Ignore:
Files:
1 added
1 deleted
11 edited

Legend:

Unmodified
Added
Removed
  • MANIFEST.in

    r10344 r10365  
    77 
    88recursive-include source *.bat *.c *.cpp *.h *.hpp *.mak COPYRIGHT *.py *.txt *.sip *.defs *.cmake 
     9prune source/orangeqt/build 
     10prune source/*/px 
     11prune source/*/ppp 
     12 
    913recursive-include docs *.rst *.py *.png *.css *.txt Makefile 
    1014 
     
    1519include LICENSES 
    1620include setup-site.cfg 
    17 include setupegg.py 
    1821include setup.py 
  • Orange/OrangeWidgets/Data/OWEditDomain.py

    r10046 r10358  
    2222 
    2323def get_qualified(module, name): 
    24     """ Return a qualified module member ``name`` inside the named  
     24    """Return a qualified module member ``name`` inside the named  
    2525    ``module``. 
    2626     
    27     The module (or package) firts gets imported and the name 
     27    The module (or package) first gets imported and the name 
    2828    is retrieved from the module's global namespace. 
    2929      
    3030    """ 
    31     module = __import__(module) 
     31    # see __import__.__doc__ for why 'fromlist' is used 
     32    module = __import__(module, fromlist=[name]) 
    3233    return getattr(module, name) 
    3334 
    3435def variable_description(var): 
    35     """ Return a variable descriptor. 
     36    """Return a variable descriptor. 
    3637     
    3738    A descriptor is a hashable tuple which should uniquely define  
    3839    the variable i.e. (module, type_name, variable_name,  
    39     any_kwargs, attributes-labels). 
     40    any_kwargs, sorted-attributes-items). 
    4041     
    4142    """ 
     
    5556 
    5657def variable_from_description(description): 
    57     """ Construct a variable from its description 
     58    """Construct a variable from its description 
    5859    (:ref:`variable_description`). 
    5960     
    6061    """ 
    6162    module, type_name, name, kwargs, attrs = description 
    62     type = get_qualified(module, type_name) 
     63    try: 
     64        type = get_qualified(module, type_name) 
     65    except (ImportError, AttributeError), ex: 
     66        raise ValueError("""Invalid descriptor type '{}.{}\ 
     67        """.format(module, type_name)) 
     68         
    6369    var = type(name, **dict(list(kwargs))) 
    6470    var.attributes.update(attrs) 
     
    7581     
    7682class DictItemsModel(QStandardItemModel): 
    77     """ A Qt Item Model class displaying the contents of a python 
     83    """A Qt Item Model class displaying the contents of a python 
    7884    dictionary. 
    7985     
     
    106112 
    107113class VariableEditor(QWidget): 
    108     """ An editor widget for a variable. 
     114    """An editor widget for a variable. 
    109115     
    110116    Can edit the variable name, and its attributes dictionary. 
     
    186192         
    187193    def set_data(self, var): 
    188         """ Set the variable to edit. 
     194        """Set the variable to edit. 
    189195        """ 
    190196        self.clear() 
     
    200206             
    201207    def get_data(self): 
    202         """ Retrieve the modified variable. 
     208        """Retrieve the modified variable. 
    203209        """ 
    204210        name = str(self.name_edit.text()) 
     
    216222     
    217223    def is_same(self): 
    218         """ Is the current model state the same as the input.  
     224        """Is the current model state the same as the input.  
    219225        """ 
    220226        name = str(self.name_edit.text()) 
     
    224230             
    225231    def clear(self): 
    226         """ Clear the editor state. 
     232        """Clear the editor state. 
    227233        """ 
    228234        self.var = None 
     
    235241             
    236242    def commit(self): 
    237         """ Emit a ``variable_changed()`` signal. 
     243        """Emit a ``variable_changed()`` signal. 
    238244        """ 
    239245        self.emit(SIGNAL("variable_changed()")) 
     
    268274         
    269275class DiscreteVariableEditor(VariableEditor): 
    270     """ An editor widget for editing a discrete variable. 
     276    """An editor widget for editing a discrete variable. 
    271277     
    272278    Extends the :class:`VariableEditor` to enable editing of 
     
    298304 
    299305    def set_data(self, var): 
    300         """ Set the variable to edit 
     306        """Set the variable to edit 
    301307        """ 
    302308        VariableEditor.set_data(self, var) 
     
    307313                 
    308314    def get_data(self): 
    309         """ Retrieve the modified variable 
     315        """Retrieve the modified variable 
    310316        """ 
    311317        name = str(self.name_edit.text()) 
     
    323329             
    324330    def is_same(self): 
    325         """ Is the current model state the same as the input.  
     331        """Is the current model state the same as the input.  
    326332        """ 
    327333        values = map(str, self.values_model) 
     
    329335     
    330336    def clear(self): 
    331         """ Clear the model state. 
     337        """Clear the model state. 
    332338        """ 
    333339        VariableEditor.clear(self) 
     
    423429         
    424430    def clear(self): 
    425         """ Clear the widget state. 
     431        """Clear the widget state. 
    426432        """ 
    427433        self.data = None 
     
    431437         
    432438    def clear_editor(self): 
    433         """ Clear the current editor widget 
     439        """Clear the current editor widget 
    434440        """ 
    435441        current = self.editor_stack.currentWidget() 
     
    459465                changed = self.domain_change_hints.get(desc, None) 
    460466                if changed is not None: 
    461                     new = variable_from_description(changed) 
    462                      
    463                     # Make sure orange's domain transformations will work. 
    464                     new.source_variable = var 
    465                     new.get_value_from = Orange.core.ClassifierFromVar(whichVar=var) 
    466                     var = new 
     467                    try: 
     468                        new = variable_from_description(changed) 
     469                    except ValueError, ex: 
     470#                        print ex 
     471                        new = None 
     472                         
     473                    if new is not None: 
     474                        # Make sure orange's domain transformations will work. 
     475                        new.source_variable = var 
     476                        new.get_value_from = Orange.core.ClassifierFromVar(whichVar=var) 
     477                        var = new 
     478                         
    467479                edited_vars.append(var) 
    468480             
     
    487499             
    488500    def on_selection_changed(self, *args): 
    489         """ When selection in 'Domain Features' view changes.  
     501        """When selection in 'Domain Features' view changes.  
    490502        """ 
    491503        i = self.selected_var_index() 
     
    495507         
    496508    def selected_var_index(self): 
    497         """ Return the selected row in 'Domain Features' view or None  
     509        """Return the selected row in 'Domain Features' view or None  
    498510        if no row is selected. 
    499511         
     
    506518         
    507519    def select_variable(self, index): 
    508         """ Select the variable with ``index`` in the 'Domain Features' 
     520        """Select the variable with ``index`` in the 'Domain Features' 
    509521        view. 
    510522         
     
    515527         
    516528    def open_editor(self, index): 
    517         """ Open the editor for variable at ``index`` and move it 
     529        """Open the editor for variable at ``index`` and move it 
    518530        to the top if the stack. 
    519531         
     
    533545     
    534546    def editor_for_variable(self, var): 
    535         """ Return the editor for ``var``'s variable type. 
     547        """Return the editor for ``var``'s variable type. 
    536548         
    537549        The editors are cached and reused by type. 
     
    554566     
    555567    def on_variable_changed(self): 
    556         """ When the user edited the current variable in editor. 
     568        """When the user edited the current variable in editor. 
    557569        """ 
    558570        var = self.domain_model[self.edited_variable_index] 
     
    575587          
    576588    def reset_all(self): 
    577         """ Reset all variables to the input state. 
     589        """Reset all variables to the input state. 
    578590        """ 
    579591        self.domain_change_hints = {} 
     
    587599             
    588600    def reset_selected(self): 
    589         """ Reset the currently selected variable to its original 
     601        """Reset the currently selected variable to its original 
    590602        state. 
    591603           
     
    612624         
    613625    def commit(self): 
    614         """ Commit the changed data to output.  
     626        """Commit the changed data to output.  
    615627        """ 
    616628        new_data = None 
  • Orange/__init__.py

    r10333 r10348  
     1from __future__ import absolute_import 
    12__version__ = "2.5a3" 
    23 
    3 import orange 
     4from . import orange 
    45 
    56# Definitely ugly, but I see no other workaround. 
  • Orange/evaluation/scoring.py

    r10343 r10367  
    1 import  math, functools 
     1import math 
     2import functools 
    23from operator import add 
     4 
    35import numpy 
    46 
     
    2022def log2(x): 
    2123    """Calculate logarithm in base 2.""" 
    22     return math.log(x)/math.log(2) 
     24    return math.log(x) / math.log(2) 
    2325 
    2426def check_non_zero(x): 
    25     """Throw Value Error when x = 0.0.""" 
    26     if x==0.0: 
    27         raise ValueError, "Cannot compute the score: no examples or sum of weights is 0.0." 
     27    """Throw Value Error when x = 0.""" 
     28    if x == 0.: 
     29        raise ValueError, "Cannot compute the score: no examples or sum of weights is 0." 
    2830 
    2931def gettotweight(res): 
    30     """Sum all the weights""" 
    31     totweight = reduce(lambda x, y: x+y.weight, res.results, 0) 
    32     if totweight==0.0: 
    33         raise ValueError, "Cannot compute the score: sum of weights is 0.0." 
     32    """Sum all the weights.""" 
     33    totweight = reduce(lambda x, y: x + y.weight, res.results, 0) 
     34    if totweight == 0.: 
     35        raise ValueError, "Cannot compute the score: sum of weights is 0." 
    3436    return totweight 
    3537 
    3638def gettotsize(res): 
    37     """ Get number of result instances """ 
     39    """Get number of result instances.""" 
    3840    if len(res.results): 
    3941        return len(res.results) 
     
    4345 
    4446def split_by_iterations(res): 
    45     """ Splits ExperimentResults of multiple iteratation test into a list 
     47    """Split ExperimentResults of a multiple iteratation test into a list 
    4648    of ExperimentResults, one for each iteration. 
    4749    """ 
     
    5961 
    6062def split_by_classifiers(res): 
    61     """ Splites an instance of :obj:`ExperimentResults` into a list of 
     63    """Split an instance of :obj:`ExperimentResults` into a list of 
    6264    :obj:`ExperimentResults`, one for each classifier.  
    6365    """ 
     
    6870                    weights=res.weights, baseClass=res.baseClass, 
    6971                    classifiers=[res.classifiers[i]] if res.classifiers else [], 
    70                     test_type = res.test_type, labels = res.labels) 
     72                    test_type=res.test_type, labels=res.labels) 
    7173        r.results = [] 
    7274        for te in res.results: 
    73             r.results.append(Orange.evaluation.testing.TestedExample(te.iterationNumber, 
    74                                 te.actualClass, n=1, weight=te.weight)) 
     75            r.results.append(Orange.evaluation.testing.TestedExample( 
     76                te.iterationNumber, te.actualClass, n=1, weight=te.weight)) 
    7577            r.results[-1].classes = [te.classes[i]] 
    7678            r.results[-1].probabilities = [te.probabilities[i]] 
     
    8082 
    8183def class_probabilities_from_res(res, **argkw): 
    82     """Calculate class probabilities""" 
    83     probs = [0.0] * len(res.class_values) 
     84    """Calculate class probabilities.""" 
     85    probs = [0.] * len(res.class_values) 
    8486    if argkw.get("unweighted", 0) or not res.weights: 
    8587        for tex in res.results: 
    86             probs[int(tex.actual_class)] += 1.0 
     88            probs[int(tex.actual_class)] += 1. 
    8789        totweight = gettotsize(res) 
    8890    else: 
    89         totweight = 0.0 
     91        totweight = 0. 
    9092        for tex in res.results: 
    9193            probs[tex.actual_class] += tex.weight 
    9294            totweight += tex.weight 
    9395        check_non_zero(totweight) 
    94     return [prob/totweight for prob in probs] 
     96    return [prob / totweight for prob in probs] 
    9597 
    9698 
     
    103105    if iteration_is_outer: 
    104106        if not stats: 
    105             raise ValueError, "Cannot compute the score: no examples or sum of weights is 0.0." 
     107            raise ValueError, "Cannot compute the score: no examples or sum of weights is 0." 
    106108        number_of_learners = len(stats[0]) 
    107         stats = filter(lambda (x, fN): fN>0.0, zip(stats,fold_n)) 
    108         stats = [ [x[lrn]/fN for x, fN in stats] for lrn in range(number_of_learners)] 
     109        stats = filter(lambda (x, fN): fN > 0, zip(stats, fold_n)) 
     110        stats = [[x[lrn] / fN for x, fN in stats] 
     111                 for lrn in range(number_of_learners)] 
    109112    else: 
    110         stats = [ [x/Fn for x, Fn in filter(lambda (x, Fn): Fn > 0.0, zip(lrnD, fold_n))] for lrnD in stats] 
     113        stats = [[x / Fn for x, Fn in filter(lambda (x, Fn): Fn > 0, 
     114                 zip(lrnD, fold_n))] for lrnD in stats] 
    111115 
    112116    if not stats: 
    113117        raise ValueError, "Cannot compute the score: no classifiers" 
    114118    if not stats[0]: 
    115         raise ValueError, "Cannot compute the score: no examples or sum of weights is 0.0." 
     119        raise ValueError, "Cannot compute the score: no examples or sum of weights is 0." 
    116120     
    117121    if report_se: 
     
    121125     
    122126def ME(res, **argkw): 
    123     MEs = [0.0]*res.number_of_learners 
     127    MEs = [0.] * res.number_of_learners 
    124128 
    125129    if argkw.get("unweighted", 0) or not res.weights: 
     
    130134    else: 
    131135        for tex in res.results: 
    132             MEs = map(lambda res, cls, ac = float(tex.actual_class), tw = tex.weight: 
    133                        res + tw*abs(float(cls) - ac), MEs, tex.classes) 
     136            MEs = map(lambda res, cls, ac=float(tex.actual_class), tw=tex.weight: 
     137                       res + tw * abs(float(cls) - ac), MEs, tex.classes) 
    134138        totweight = gettotweight(res) 
    135139 
    136     return [x/totweight for x in MEs] 
     140    return [x / totweight for x in MEs] 
    137141 
    138142MAE = ME 
     
    141145class ConfusionMatrix: 
    142146    """ 
    143     Classification result summary 
     147    Classification result summary. 
    144148    """ 
    145149    #: True Positive predictions 
     
    154158    @deprecated_keywords({"predictedPositive": "predicted_positive", 
    155159                          "isPositive": "is_positive"}) 
    156     def addTFPosNeg(self, predicted_positive, is_positive, weight = 1.0): 
     160    def addTFPosNeg(self, predicted_positive, is_positive, weight=1.0): 
    157161        """ 
    158         Update confusion matrix with result of a single classification 
     162        Update confusion matrix with result of a single classification. 
    159163 
    160164        :param predicted_positive: positive class value was predicted 
     
    180184 
    181185def check_argkw(dct, lst): 
    182     """check_argkw(dct, lst) -> returns true if any items have non-zero value in dct""" 
    183     return reduce(lambda x,y: x or y, [dct.get(k, 0) for k in lst]) 
     186    """Return True if any item from lst has a non-zero value in dct.""" 
     187    return reduce(lambda x, y: x or y, [dct.get(k, 0) for k in lst]) 
    184188 
    185189def regression_error(res, **argkw): 
    186     """regression_error(res) -> regression error (default: MSE)""" 
     190    """Return the regression error (default: MSE).""" 
    187191    if argkw.get("SE", 0) and res.number_of_iterations > 1: 
    188192        # computes the scores for each iteration, then averages 
    189         scores = [[0.0] * res.number_of_iterations for _ in range(res.number_of_learners)] 
    190         norm=None 
     193        scores = [[0.] * res.number_of_iterations 
     194                  for _ in range(res.number_of_learners)] 
     195        norm = None 
    191196        if argkw.get("norm-abs", 0) or argkw.get("norm-sqr", 0): 
    192             norm = [0.0] * res.number_of_iterations 
    193  
    194         nIter = [0]*res.number_of_iterations       # counts examples in each iteration 
    195         a = [0]*res.number_of_iterations           # average class in each iteration 
     197            norm = [0.] * res.number_of_iterations 
     198 
     199        # counts examples in each iteration 
     200        nIter = [0] * res.number_of_iterations 
     201        # average class in each iteration 
     202        a = [0] * res.number_of_iterations 
    196203        for tex in res.results: 
    197204            nIter[tex.iteration_number] += 1 
    198205            a[tex.iteration_number] += float(tex.actual_class) 
    199         a = [a[i]/nIter[i] for i in range(res.number_of_iterations)] 
     206        a = [a[i] / nIter[i] for i in range(res.number_of_iterations)] 
    200207 
    201208        if argkw.get("unweighted", 0) or not res.weights: 
     
    217224                    else: 
    218225                        scores[i][tex.iteration_number] += (float(cls) - ai)**2 
    219         else: # unweighted<>0 
     226        else: # unweighted != 0 
    220227            raise NotImplementedError, "weighted error scores with SE not implemented yet" 
    221228 
    222229        if argkw.get("norm-abs") or argkw.get("norm-sqr"): 
    223             scores = [[x/n for x, n in zip(y, norm)] for y in scores] 
    224         else: 
    225             scores = [[x/ni for x, ni in zip(y, nIter)] for y in scores] 
     230            scores = [[x / n for x, n in zip(y, norm)] for y in scores] 
     231        else: 
     232            scores = [[x / ni for x, ni in zip(y, nIter)] for y in scores] 
    226233 
    227234        if argkw.get("R2"): 
     
    234241         
    235242    else: # single iteration (testing on a single test set) 
    236         scores = [0.0] * res.number_of_learners 
    237         norm = 0.0 
     243        scores = [0.] * res.number_of_learners 
     244        norm = 0. 
    238245 
    239246        if argkw.get("unweighted", 0) or not res.weights: 
     
    242249            for tex in res.results: 
    243250                if argkw.get("abs", 0): 
    244                     scores = map(lambda res, cls, ac = float(tex.actual_class): 
     251                    scores = map(lambda res, cls, ac=float(tex.actual_class): 
    245252                                 res + abs(float(cls) - ac), scores, tex.classes) 
    246253                else: 
    247                     scores = map(lambda res, cls, ac = float(tex.actual_class): 
     254                    scores = map(lambda res, cls, ac=float(tex.actual_class): 
    248255                                 res + (float(cls) - ac)**2, scores, tex.classes) 
    249256 
     
    255262        else: 
    256263            # UNFINISHED 
    257             MSEs = [0.]*res.number_of_learners 
     264            MSEs = [0.] * res.number_of_learners 
    258265            for tex in res.results: 
    259                 MSEs = map(lambda res, cls, ac = float(tex.actual_class), 
    260                            tw = tex.weight: 
     266                MSEs = map(lambda res, cls, ac=float(tex.actual_class), 
     267                           tw=tex.weight: 
    261268                           res + tw * (float(cls) - ac)**2, MSEs, tex.classes) 
    262269            totweight = gettotweight(res) 
    263270 
    264271        if argkw.get("norm-abs", 0) or argkw.get("norm-sqr", 0): 
    265             scores = [s/norm for s in scores] 
     272            scores = [s / norm for s in scores] 
    266273        else: # normalize by number of instances (or sum of weights) 
    267             scores = [s/totweight for s in scores] 
     274            scores = [s / totweight for s in scores] 
    268275 
    269276        if argkw.get("R2"): 
    270             scores = [1.0 - s for s in scores] 
     277            scores = [1. - s for s in scores] 
    271278 
    272279        if argkw.get("sqrt", 0): 
     
    276283 
    277284def MSE(res, **argkw): 
    278     """ Computes mean-squared error. """ 
     285    """Compute mean-squared error.""" 
    279286    return regression_error(res, **argkw) 
    280287     
    281288def RMSE(res, **argkw): 
    282     """ Computes root mean-squared error. """ 
     289    """Compute root mean-squared error.""" 
    283290    argkw.setdefault("sqrt", True) 
    284291    return regression_error(res, **argkw) 
    285292 
    286293def MAE(res, **argkw): 
    287     """ Computes mean absolute error. """ 
     294    """Compute mean absolute error.""" 
    288295    argkw.setdefault("abs", True) 
    289296    return regression_error(res, **argkw) 
    290297 
    291298def RSE(res, **argkw): 
    292     """ Computes relative squared error. """ 
     299    """Compute relative squared error.""" 
    293300    argkw.setdefault("norm-sqr", True) 
    294301    return regression_error(res, **argkw) 
    295302 
    296303def RRSE(res, **argkw): 
    297     """ Computes relative squared error. """ 
     304    """Compute relative squared error.""" 
    298305    argkw.setdefault("norm-sqr", True) 
    299306    argkw.setdefault("sqrt", True) 
     
    301308 
    302309def RAE(res, **argkw): 
    303     """ Computes relative absolute error. """ 
     310    """Compute relative absolute error.""" 
    304311    argkw.setdefault("abs", True) 
    305312    argkw.setdefault("norm-abs", True) 
     
    307314 
    308315def R2(res, **argkw): 
    309     """ Computes the coefficient of determination, R-squared. """ 
     316    """Compute the coefficient of determination, R-squared.""" 
    310317    argkw.setdefault("norm-sqr", True) 
    311318    argkw.setdefault("R2", True) 
     
    313320 
    314321def MSE_old(res, **argkw): 
    315     """MSE(res) -> mean-squared error""" 
     322    """Compute mean-squared error.""" 
    316323    if argkw.get("SE", 0) and res.number_of_iterations > 1: 
    317         MSEs = [[0.0] * res.number_of_iterations for _ in range(res.number_of_learners)] 
    318         nIter = [0]*res.number_of_iterations 
     324        MSEs = [[0.] * res.number_of_iterations 
     325                for _ in range(res.number_of_learners)] 
     326        nIter = [0] * res.number_of_iterations 
    319327        if argkw.get("unweighted", 0) or not res.weights: 
    320328            for tex in res.results: 
     
    325333        else: 
    326334            raise ValueError, "weighted RMSE with SE not implemented yet" 
    327         MSEs = [[x/ni for x, ni in zip(y, nIter)] for y in MSEs] 
     335        MSEs = [[x / ni for x, ni in zip(y, nIter)] for y in MSEs] 
    328336        if argkw.get("sqrt", 0): 
    329337            MSEs = [[math.sqrt(x) for x in y] for y in MSEs] 
     
    331339         
    332340    else: 
    333         MSEs = [0.0]*res.number_of_learners 
     341        MSEs = [0.] * res.number_of_learners 
    334342        if argkw.get("unweighted", 0) or not res.weights: 
    335343            for tex in res.results: 
    336                 MSEs = map(lambda res, cls, ac = float(tex.actual_class): 
     344                MSEs = map(lambda res, cls, ac=float(tex.actual_class): 
    337345                           res + (float(cls) - ac)**2, MSEs, tex.classes) 
    338346            totweight = gettotsize(res) 
    339347        else: 
    340348            for tex in res.results: 
    341                 MSEs = map(lambda res, cls, ac = float(tex.actual_class), tw = tex.weight: 
    342                            res + tw * (float(cls) - ac)**2, MSEs, tex.classes) 
     349                MSEs = map(lambda res, cls, ac=float(tex.actual_class), 
     350                           tw=tex.weight: res + tw * (float(cls) - ac)**2, 
     351                           MSEs, tex.classes) 
    343352            totweight = gettotweight(res) 
    344353 
    345354        if argkw.get("sqrt", 0): 
    346355            MSEs = [math.sqrt(x) for x in MSEs] 
    347         return [x/totweight for x in MSEs] 
     356        return [x / totweight for x in MSEs] 
    348357 
    349358def RMSE_old(res, **argkw): 
    350     """RMSE(res) -> root mean-squared error""" 
     359    """Compute root mean-squared error.""" 
    351360    argkw.setdefault("sqrt", 1) 
    352361    return MSE_old(res, **argkw) 
     
    384393        input_type = self.get_input_type(test_results) 
    385394        if input_type == self.CONFUSION_MATRIX: 
    386             self[:] =  [self.from_confusion_matrix(test_results)] 
     395            self[:] = [self.from_confusion_matrix(test_results)] 
    387396        elif input_type == self.CONFUSION_MATRIX_LIST: 
    388397            self[:] = self.from_confusion_matrix_list(test_results) 
     
    390399            self[:] = self.from_classification_results(test_results) 
    391400        elif input_type == self.CROSS_VALIDATION: 
    392             self[:] =  self.from_crossvalidation_results(test_results) 
     401            self[:] = self.from_crossvalidation_results(test_results) 
    393402 
    394403    def from_confusion_matrix(self, cm): 
     
    396405        correct_predictions = 0. 
    397406        if isinstance(cm, ConfusionMatrix): 
    398             all_predictions += cm.TP+cm.FN+cm.FP+cm.TN 
    399             correct_predictions += cm.TP+cm.TN 
     407            all_predictions += cm.TP + cm.FN + cm.FP + cm.TN 
     408            correct_predictions += cm.TP + cm.TN 
    400409        else: 
    401410            for r, row in enumerate(cm): 
     
    406415 
    407416        check_non_zero(all_predictions) 
    408         ca = correct_predictions/all_predictions 
     417        ca = correct_predictions / all_predictions 
    409418 
    410419        if self.report_se: 
    411             return ca, ca*(1-ca)/math.sqrt(all_predictions) 
     420            return ca, ca * (1 - ca) / math.sqrt(all_predictions) 
    412421        else: 
    413422            return ca 
     
    417426 
    418427    def from_classification_results(self, test_results): 
    419         CAs = [0.0]*test_results.number_of_learners 
     428        CAs = [0.] * test_results.number_of_learners 
    420429        totweight = 0. 
    421430        for tex in test_results.results: 
    422431            w = 1. if self.ignore_weights else tex.weight 
    423             CAs = map(lambda res, cls: res+(cls==tex.actual_class and w), CAs, tex.classes) 
     432            CAs = map(lambda res, cls: res + (cls == tex.actual_class and w), 
     433                      CAs, tex.classes) 
    424434            totweight += w 
    425435        check_non_zero(totweight) 
    426         ca = [x/totweight for x in CAs] 
     436        ca = [x / totweight for x in CAs] 
    427437 
    428438        if self.report_se: 
    429             return [(x, x*(1-x)/math.sqrt(totweight)) for x in ca] 
     439            return [(x, x * (1 - x) / math.sqrt(totweight)) for x in ca] 
    430440        else: 
    431441            return ca 
    432442 
    433443    def from_crossvalidation_results(self, test_results): 
    434         CAsByFold = [[0.0]*test_results.number_of_iterations for _ in range(test_results.number_of_learners)] 
    435         foldN = [0.0]*test_results.number_of_iterations 
     444        CAsByFold = [[0.] * test_results.number_of_iterations 
     445                     for _ in range(test_results.number_of_learners)] 
     446        foldN = [0.] * test_results.number_of_iterations 
    436447 
    437448        for tex in test_results.results: 
    438449            w = 1. if self.ignore_weights else tex.weight 
    439450            for lrn in range(test_results.number_of_learners): 
    440                 CAsByFold[lrn][tex.iteration_number] += (tex.classes[lrn]==tex.actual_class) and w 
     451                CAsByFold[lrn][tex.iteration_number] += (tex.classes[lrn] ==  
     452                    tex.actual_class) and w 
    441453            foldN[tex.iteration_number] += w 
    442454 
     
    457469@deprecated_keywords({"reportSE": "report_se", 
    458470                      "unweighted": "ignore_weights"}) 
    459 def AP(res, report_se = False, ignore_weights=False, **argkw): 
    460     """ Computes the average probability assigned to the correct class. """ 
     471def AP(res, report_se=False, ignore_weights=False, **argkw): 
     472    """Compute the average probability assigned to the correct class.""" 
    461473    if res.number_of_iterations == 1: 
    462         APs=[0.0]*res.number_of_learners 
     474        APs=[0.] * res.number_of_learners 
    463475        if ignore_weights or not res.weights: 
    464476            for tex in res.results: 
    465                 APs = map(lambda res, probs: res + probs[tex.actual_class], APs, tex.probabilities) 
     477                APs = map(lambda res, probs: res + probs[tex.actual_class], 
     478                          APs, tex.probabilities) 
    466479            totweight = gettotsize(res) 
    467480        else: 
    468481            totweight = 0. 
    469482            for tex in res.results: 
    470                 APs = map(lambda res, probs: res + probs[tex.actual_class]*tex.weight, APs, tex.probabilities) 
     483                APs = map(lambda res, probs: res + probs[tex.actual_class] * 
     484                          tex.weight, APs, tex.probabilities) 
    471485                totweight += tex.weight 
    472486        check_non_zero(totweight) 
    473         return [AP/totweight for AP in APs] 
    474  
    475     APsByFold = [[0.0]*res.number_of_learners for _ in range(res.number_of_iterations)] 
    476     foldN = [0.0] * res.number_of_iterations 
     487        return [AP / totweight for AP in APs] 
     488 
     489    APsByFold = [[0.] * res.number_of_learners 
     490                 for _ in range(res.number_of_iterations)] 
     491    foldN = [0.] * res.number_of_iterations 
    477492    if ignore_weights or not res.weights: 
    478493        for tex in res.results: 
    479             APsByFold[tex.iteration_number] = map(lambda res, probs: res + probs[tex.actual_class], APsByFold[tex.iteration_number], tex.probabilities) 
     494            APsByFold[tex.iteration_number] = map(lambda res, probs: 
     495                res + probs[tex.actual_class], 
     496                APsByFold[tex.iteration_number], tex.probabilities) 
    480497            foldN[tex.iteration_number] += 1 
    481498    else: 
    482499        for tex in res.results: 
    483             APsByFold[tex.iteration_number] = map(lambda res, probs: res + probs[tex.actual_class] * tex.weight, APsByFold[tex.iteration_number], tex.probabilities) 
     500            APsByFold[tex.iteration_number] = map(lambda res, probs: 
     501                res + probs[tex.actual_class] * tex.weight, 
     502                APsByFold[tex.iteration_number], tex.probabilities) 
    484503            foldN[tex.iteration_number] += tex.weight 
    485504 
     
    489508@deprecated_keywords({"reportSE": "report_se", 
    490509                      "unweighted": "ignore_weights"}) 
    491 def Brier_score(res, report_se = False, ignore_weights=False, **argkw): 
    492     """ Computes the Brier's score, defined as the average (over test examples) 
    493     of sumx(t(x)-p(x))2, where x is a class, t(x) is 1 for the correct class 
    494     and 0 for the others, and p(x) is the probability that the classifier 
    495     assigned to the class x 
     510def Brier_score(res, report_se=False, ignore_weights=False, **argkw): 
     511    """Compute the Brier score, defined as the average (over test instances) 
     512    of :math:`\sum_x(t(x) - p(x))^2`, where x is a class value, t(x) is 1 for 
     513    the actual class value and 0 otherwise, and p(x) is the predicted 
     514    probability of x. 
    496515    """ 
    497516    # Computes an average (over examples) of sum_x(t(x) - p(x))^2, where 
     
    506525 
    507526    if res.number_of_iterations == 1: 
    508         MSEs=[0.0]*res.number_of_learners 
     527        MSEs=[0.] * res.number_of_learners 
    509528        if ignore_weights or not res.weights: 
    510             totweight = 0.0 
     529            totweight = 0. 
    511530            for tex in res.results: 
    512                 MSEs = map(lambda res, probs: 
    513                            res + reduce(lambda s, pi: s+pi**2, probs, 0) - 2*probs[tex.actual_class], MSEs, tex.probabilities) 
     531                MSEs = map(lambda res, probs: res + reduce( 
     532                    lambda s, pi: s + pi**2, probs, 0) -  
     533                    2 * probs[tex.actual_class], MSEs, tex.probabilities) 
    514534                totweight += tex.weight 
    515535        else: 
    516536            for tex in res.results: 
    517                 MSEs = map(lambda res, probs: 
    518                            res + tex.weight*reduce(lambda s, pi: s+pi**2, probs, 0) - 2*probs[tex.actual_class], MSEs, tex.probabilities) 
     537                MSEs = map(lambda res, probs: res + tex.weight * reduce( 
     538                    lambda s, pi: s + pi**2, probs, 0) -  
     539                    2 * probs[tex.actual_class], MSEs, tex.probabilities) 
    519540            totweight = gettotweight(res) 
    520541        check_non_zero(totweight) 
    521542        if report_se: 
    522             return [(max(x/totweight+1.0, 0), 0) for x in MSEs]  ## change this, not zero!!! 
    523         else: 
    524             return [max(x/totweight+1.0, 0) for x in MSEs] 
    525  
    526     BSs = [[0.0]*res.number_of_learners for _ in range(res.number_of_iterations)] 
     543            ## change this, not zero!!! 
     544            return [(max(x / totweight + 1., 0), 0) for x in MSEs] 
     545        else: 
     546            return [max(x / totweight + 1., 0) for x in MSEs] 
     547 
     548    BSs = [[0.] * res.number_of_learners 
     549           for _ in range(res.number_of_iterations)] 
    527550    foldN = [0.] * res.number_of_iterations 
    528551 
    529552    if ignore_weights or not res.weights: 
    530553        for tex in res.results: 
    531             BSs[tex.iteration_number] = map(lambda rr, probs: 
    532                        rr + reduce(lambda s, pi: s+pi**2, probs, 0) - 2*probs[tex.actual_class], BSs[tex.iteration_number], tex.probabilities) 
     554            BSs[tex.iteration_number] = map(lambda rr, probs: rr + reduce( 
     555                lambda s, pi: s + pi**2, probs, 0) - 
     556                2 * probs[tex.actual_class], BSs[tex.iteration_number], 
     557                tex.probabilities) 
    533558            foldN[tex.iteration_number] += 1 
    534559    else: 
    535560        for tex in res.results: 
    536561            BSs[tex.iteration_number] = map(lambda res, probs: 
    537                        res + tex.weight*reduce(lambda s, pi: s+pi**2, probs, 0) - 2*probs[tex.actual_class], BSs[tex.iteration_number], tex.probabilities) 
     562                res + tex.weight * reduce(lambda s, pi: s + pi**2, probs, 0) - 
     563                2 * probs[tex. actual_class], BSs[tex.iteration_number], 
     564                tex.probabilities) 
    538565            foldN[tex.iteration_number] += tex.weight 
    539566 
    540567    stats = statistics_by_folds(BSs, foldN, report_se, True) 
    541568    if report_se: 
    542         return [(x+1.0, y) for x, y in stats] 
     569        return [(x + 1., y) for x, y in stats] 
    543570    else: 
    544         return [x+1.0 for x in stats] 
     571        return [x + 1. for x in stats] 
    545572 
    546573def BSS(res, **argkw): 
    547     return [1-x/2 for x in apply(Brier_score, (res, ), argkw)] 
     574    return [1 - x / 2 for x in apply(Brier_score, (res, ), argkw)] 
    548575 
    549576def IS_ex(Pc, P): 
    550577    """Pc aposterior probability, P aprior""" 
    551     if Pc>=P: 
    552         return -log2(P)+log2(Pc) 
     578    if Pc >= P: 
     579        return -log2(P) + log2(Pc) 
    553580    else: 
    554         return -(-log2(1-P)+log2(1-Pc)) 
     581        return -(-log2(1 - P) + log2(1 - Pc)) 
    555582 
    556583 
    557584@deprecated_keywords({"reportSE": "report_se"}) 
    558 def IS(res, apriori=None, report_se = False, **argkw): 
    559     """ Computes the information score as defined by  
     585def IS(res, apriori=None, report_se=False, **argkw): 
     586    """Compute the information score as defined by  
    560587    `Kononenko and Bratko (1991) \ 
    561588    <http://www.springerlink.com/content/g5p7473160476612/>`_. 
     
    567594        apriori = class_probabilities_from_res(res) 
    568595 
    569     if res.number_of_iterations==1: 
    570         ISs = [0.0]*res.number_of_learners 
     596    if res.number_of_iterations == 1: 
     597        ISs = [0.] * res.number_of_learners 
    571598        if argkw.get("unweighted", 0) or not res.weights: 
    572599            for tex in res.results: 
     
    579606              for i in range(len(tex.probabilities)): 
    580607                    cls = tex.actual_class 
    581                     ISs[i] += IS_ex(tex.probabilities[i][cls], apriori[cls]) * tex.weight 
     608                    ISs[i] += (IS_ex(tex.probabilities[i][cls], apriori[cls]) * 
     609                               tex.weight) 
    582610            totweight = gettotweight(res) 
    583611        if report_se: 
    584             return [(IS/totweight,0) for IS in ISs] 
    585         else: 
    586             return [IS/totweight for IS in ISs] 
     612            return [(IS / totweight, 0) for IS in ISs] 
     613        else: 
     614            return [IS / totweight for IS in ISs] 
    587615 
    588616         
    589     ISs = [[0.0]*res.number_of_iterations for _ in range(res.number_of_learners)] 
     617    ISs = [[0.] * res.number_of_iterations 
     618           for _ in range(res.number_of_learners)] 
    590619    foldN = [0.] * res.number_of_iterations 
    591620 
     
    595624            for i in range(len(tex.probabilities)): 
    596625                cls = tex.actual_class 
    597                 ISs[i][tex.iteration_number] += IS_ex(tex.probabilities[i][cls], apriori[cls]) 
     626                ISs[i][tex.iteration_number] += IS_ex(tex.probabilities[i][cls], 
     627                                                apriori[cls]) 
    598628            foldN[tex.iteration_number] += 1 
    599629    else: 
     
    601631            for i in range(len(tex.probabilities)): 
    602632                cls = tex.actual_class 
    603                 ISs[i][tex.iteration_number] += IS_ex(tex.probabilities[i][cls], apriori[cls]) * tex.weight 
     633                ISs[i][tex.iteration_number] += IS_ex(tex.probabilities[i][cls], 
     634                                                apriori[cls]) * tex.weight 
    604635            foldN[tex.iteration_number] += tex.weight 
    605636 
     
    622653 
    623654    k = len(res.results[0].classes) 
    624     if k<2: 
     655    if k < 2: 
    625656        raise TypeError, "nothing to compare (less than two classifiers given)" 
    626     if k==2: 
     657    if k == 2: 
    627658        return apply(Wilcoxon, (res, statistics), argkw) 
    628659    else: 
     
    648679    tfpns = [ConfusionMatrix() for _ in range(test_results.number_of_learners)] 
    649680     
    650     if class_index<0: 
     681    if class_index < 0: 
    651682        numberOfClasses = len(test_results.class_values) 
    652683        if class_index < -1 or numberOfClasses > 2: 
    653             cm = [[[0.0] * numberOfClasses for _ in range(numberOfClasses)] for _ in range(test_results.number_of_learners)] 
     684            cm = [[[0.] * numberOfClasses for _ in range(numberOfClasses)] 
     685                  for _ in range(test_results.number_of_learners)] 
    654686            if ignore_weights or not test_results.weights: 
    655687                for tex in test_results.results: 
     
    668700            return cm 
    669701             
    670         elif test_results.baseClass>=0: 
     702        elif test_results.baseClass >= 0: 
    671703            class_index = test_results.baseClass 
    672704        else: 
     
    678710                isPositive=(lr.actual_class==class_index) 
    679711                for i in range(test_results.number_of_learners): 
    680                     tfpns[i].addTFPosNeg(lr.probabilities[i][class_index]>cutoff, isPositive) 
     712                    tfpns[i].addTFPosNeg(lr.probabilities[i][class_index] > 
     713                                         cutoff, isPositive) 
    681714        else: 
    682715            for lr in test_results.results: 
    683                 isPositive=(lr.actual_class==class_index) 
     716                isPositive=(lr.actual_class == class_index) 
    684717                for i in range(test_results.number_of_learners): 
    685                     tfpns[i].addTFPosNeg(lr.probabilities[i][class_index]>cutoff, isPositive, lr.weight) 
     718                    tfpns[i].addTFPosNeg(lr.probabilities[i][class_index] >  
     719                                         cutoff, isPositive, lr.weight) 
    686720    else: 
    687721        if ignore_weights or not test_results.weights: 
    688722            for lr in test_results.results: 
    689                 isPositive=(lr.actual_class==class_index) 
     723                isPositive = (lr.actual_class == class_index) 
    690724                for i in range(test_results.number_of_learners): 
    691                     tfpns[i].addTFPosNeg(lr.classes[i]==class_index, isPositive) 
     725                    tfpns[i].addTFPosNeg(lr.classes[i] == class_index, 
     726                                         isPositive) 
    692727        else: 
    693728            for lr in test_results.results: 
    694                 isPositive=(lr.actual_class==class_index) 
     729                isPositive = (lr.actual_class == class_index) 
    695730                for i in range(test_results.number_of_learners): 
    696                     tfpns[i].addTFPosNeg(lr.classes[i]==class_index, isPositive, lr.weight) 
     731                    tfpns[i].addTFPosNeg(lr.classes[i] == class_index, 
     732                                         isPositive, lr.weight) 
    697733    return tfpns 
    698734 
     
    722758    colPriors = [sum(r[i] for r in confusion_matrix) for i in range(dim)] 
    723759    total = sum(rowPriors) 
    724     rowPriors = [r/total for r in rowPriors] 
    725     colPriors = [r/total for r in colPriors] 
     760    rowPriors = [r / total for r in rowPriors] 
     761    colPriors = [r / total for r in colPriors] 
    726762    ss = 0 
    727763    for ri, row in enumerate(confusion_matrix): 
     
    730766            if not e: 
    731767                return -1, -1, -1 
    732             ss += (o-e)**2 / e 
    733     df = (dim-1)**2 
     768            ss += (o - e)**2 / e 
     769    df = (dim - 1)**2 
    734770    return ss, df, statc.chisqprob(ss, df) 
    735771 
     
    737773def sens(confusion_matrix): 
    738774    """ 
    739     Return `sensitivity <http://en.wikipedia.org/wiki/Sensitivity_and_specificity>`_ 
    740     (proportion of actual positives which are correctly identified as such). 
     775    Return `sensitivity 
     776    <http://en.wikipedia.org/wiki/Sensitivity_and_specificity>`_ (proportion 
     777    of actual positives which are correctly identified as such). 
    741778    """ 
    742779    if type(confusion_matrix) == list: 
     
    749786            return -1 
    750787 
    751         return confusion_matrix.TP/tot 
     788        return confusion_matrix.TP / tot 
    752789 
    753790 
     
    764801def spec(confusion_matrix): 
    765802    """ 
    766     Return `specificity <http://en.wikipedia.org/wiki/Sensitivity_and_specificity>`_ 
     803    Return `specificity 
     804    <http://en.wikipedia.org/wiki/Sensitivity_and_specificity>`_ 
    767805    (proportion of negatives which are correctly identified). 
    768806    """ 
     
    775813            warnings.warn("Can't compute specificity: one or both classes have no instances") 
    776814            return -1 
    777         return confusion_matrix.TN/tot 
     815        return confusion_matrix.TN / tot 
    778816 
    779817 
     
    781819def PPV(confusion_matrix): 
    782820    """ 
    783     Return `positive predictive value <http://en.wikipedia.org/wiki/Positive_predictive_value>`_ 
    784     (proportion of subjects with positive test results who are correctly diagnosed).""" 
     821    Return `positive predictive value 
     822    <http://en.wikipedia.org/wiki/Positive_predictive_value>`_ (proportion of 
     823    subjects with positive test results who are correctly diagnosed).""" 
    785824    if type(confusion_matrix) == list: 
    786825        return [PPV(cm) for cm in confusion_matrix] 
    787826    else: 
    788         tot = confusion_matrix.TP+confusion_matrix.FP 
     827        tot = confusion_matrix.TP + confusion_matrix.FP 
    789828        if tot < 1e-6: 
    790829            import warnings 
     
    804843@deprecated_keywords({"confm": "confusion_matrix"}) 
    805844def NPV(confusion_matrix): 
    806     """Return `negative predictive value <http://en.wikipedia.org/wiki/Negative_predictive_value>`_ 
    807      (proportion of subjects with a negative test result who are correctly 
    808      diagnosed). 
     845    """ 
     846    Return `negative predictive value 
     847    <http://en.wikipedia.org/wiki/Negative_predictive_value>`_ (proportion of 
     848    subjects with a negative test result who are correctly diagnosed). 
    809849     """ 
    810850    if type(confusion_matrix) == list: 
    811851        return [NPV(cm) for cm in confusion_matrix] 
    812852    else: 
    813         tot = confusion_matrix.FN+confusion_matrix.TN 
     853        tot = confusion_matrix.FN + confusion_matrix.TN 
    814854        if tot < 1e-6: 
    815855            import warnings 
    816856            warnings.warn("Can't compute NPV: one or both classes have no instances") 
    817857            return -1 
    818         return confusion_matrix.TN/tot 
     858        return confusion_matrix.TN / tot 
    819859 
    820860@deprecated_keywords({"confm": "confusion_matrix"}) 
    821861def F1(confusion_matrix): 
    822     """Return `F1 score <http://en.wikipedia.org/wiki/F1_score>`_ 
    823     (harmonic mean of precision and recall).""" 
     862    """ 
     863    Return `F1 score <http://en.wikipedia.org/wiki/F1_score>`_ 
     864    (harmonic mean of precision and recall). 
     865    """ 
    824866    if type(confusion_matrix) == list: 
    825867        return [F1(cm) for cm in confusion_matrix] 
     
    837879@deprecated_keywords({"confm": "confusion_matrix"}) 
    838880def Falpha(confusion_matrix, alpha=1.0): 
    839     """Return the alpha-mean of precision and recall over the given confusion matrix.""" 
     881    """ 
     882    Return the alpha-mean of precision and recall over the given confusion 
     883    matrix. 
     884    """ 
    840885    if type(confusion_matrix) == list: 
    841886        return [Falpha(cm, alpha=alpha) for cm in confusion_matrix] 
     
    849894def MCC(confusion_matrix): 
    850895    """ 
    851     Return `Matthew correlation coefficient <http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_ 
    852     (correlation coefficient between the observed and predicted binary classifications) 
     896    Return `Matthew correlation coefficient 
     897    <http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_ 
     898    (correlation coefficient between the observed and predicted binary 
     899    classifications). 
    853900    """ 
    854901    # code by Boris Gorelik 
     
    862909           
    863910        try:    
    864             r = (((truePositive * trueNegative) - (falsePositive * falseNegative))/  
    865                 math.sqrt(  (truePositive + falsePositive)  *  
    866                 ( truePositive + falseNegative ) *  
    867                 ( trueNegative + falsePositive ) *  
    868                 ( trueNegative + falseNegative ) ) 
    869                 ) 
     911            r = (((truePositive * trueNegative) - 
     912                  (falsePositive * falseNegative)) / 
     913                 math.sqrt((truePositive + falsePositive) * 
     914                           (truePositive + falseNegative) *  
     915                           (trueNegative + falsePositive) *  
     916                           (trueNegative + falseNegative))) 
    870917        except ZeroDivisionError: 
    871918            # Zero difision occurs when there is either no true positives  
     
    887934   raters. 
    888935 
    889    @param confusion_matrix: confusion matrix, or list of confusion matrices. To obtain 
    890                            non-binary confusion matrix, call 
    891                            Orange.evaluation.scoring.compute_confusion_matrices and set the 
    892                            classIndex parameter to -2. 
     936   @param confusion_matrix: confusion matrix, or list of confusion matrices. 
     937                            To obtain non-binary confusion matrix, call 
     938                            Orange.evaluation.scoring.confusion_matrices 
     939                            and set the class_index parameter to -2. 
    893940   @param b_is_list_of_matrices: specifies whether confm is list of matrices. 
    894                            This function needs to operate on non-binary 
    895                            confusion matrices, which are represented by python 
    896                            lists, therefore one needs a way to distinguish 
    897                            between a single matrix and list of matrices 
     941                            This function needs to operate on non-binary 
     942                            confusion matrices, which are represented by python 
     943                            lists, therefore one needs a way to distinguish 
     944                            between a single matrix and list of matrices 
    898945   """ 
    899946 
    900947   if b_is_list_of_matrices: 
    901948       try: 
    902            return [scotts_pi(cm, b_is_list_of_matrices=False) for cm in confusion_matrix] 
     949           return [scotts_pi(cm, b_is_list_of_matrices=False) 
     950                   for cm in confusion_matrix] 
    903951       except TypeError: 
    904952           # Nevermind the parameter, maybe this is a "conventional" binary 
     
    907955   else: 
    908956       if isinstance(confusion_matrix, ConfusionMatrix): 
    909            confusion_matrix = numpy.array( [[confusion_matrix.TP, confusion_matrix.FN], 
    910                    [confusion_matrix.FP, confusion_matrix.TN]], dtype=float) 
     957           confusion_matrix = numpy.array([[confusion_matrix.TP, 
     958               confusion_matrix.FN], [confusion_matrix.FP, 
     959               confusion_matrix.TN]], dtype=float) 
    911960       else: 
    912961           confusion_matrix = numpy.array(confusion_matrix, dtype=float) 
     
    914963       marginalSumOfRows = numpy.sum(confusion_matrix, axis=0) 
    915964       marginalSumOfColumns = numpy.sum(confusion_matrix, axis=1) 
    916        jointProportion = (marginalSumOfColumns + marginalSumOfRows)/ \ 
    917                            (2.0 * numpy.sum(confusion_matrix)) 
    918        # In the eq. above, 2.0 is what the Wikipedia page calls 
     965       jointProportion = (marginalSumOfColumns + marginalSumOfRows) / \ 
     966                           (2. * numpy.sum(confusion_matrix)) 
     967       # In the eq. above, 2. is what the Wikipedia page calls 
    919968       # the number of annotators. Here we have two annotators: 
    920969       # the observed (true) labels (annotations) and the predicted by 
     
    922971 
    923972       prExpected = numpy.sum(jointProportion ** 2) 
    924        prActual = numpy.sum(numpy.diag(confusion_matrix)) /numpy.sum(confusion_matrix) 
     973       prActual = numpy.sum(numpy.diag(confusion_matrix)) / \ 
     974                  numpy.sum(confusion_matrix) 
    925975 
    926976       ret = (prActual - prExpected) / (1.0 - prExpected) 
     
    930980                      "unweighted": "ignore_weights"}) 
    931981def AUCWilcoxon(res, class_index=-1, ignore_weights=False, **argkw): 
    932     """ Computes the area under ROC (AUC) and its standard error using 
     982    """Compute the area under ROC (AUC) and its standard error using 
    933983    Wilcoxon's approach proposed by Hanley and McNeal (1982). If  
    934     :obj:`classIndex` is not specified, the first class is used as 
     984    :obj:`class_index` is not specified, the first class is used as 
    935985    "the positive" and others are negative. The result is a list of 
    936986    tuples (aROC, standard error). 
     
    942992    problists, tots = corn.computeROCCumulative(res, class_index, useweights) 
    943993 
    944     results=[] 
     994    results = [] 
    945995 
    946996    totPos, totNeg = tots[1], tots[0] 
    947997    N = totPos + totNeg 
    948998    for plist in problists: 
    949         highPos, lowNeg = totPos, 0.0 
    950         W, Q1, Q2 = 0.0, 0.0, 0.0 
     999        highPos, lowNeg = totPos, 0. 
     1000        W, Q1, Q2 = 0., 0., 0. 
    9511001        for prob in plist: 
    9521002            thisPos, thisNeg = prob[1][1], prob[1][0] 
    9531003            highPos -= thisPos 
    954             W += thisNeg * (highPos + thisPos/2.) 
    955             Q2 += thisPos * (lowNeg**2  + lowNeg*thisNeg  + thisNeg**2 /3.) 
    956             Q1 += thisNeg * (highPos**2 + highPos*thisPos + thisPos**2 /3.) 
     1004            W += thisNeg * (highPos + thisPos / 2.) 
     1005            Q2 += thisPos * (lowNeg**2  + lowNeg*thisNeg  + thisNeg**2 / 3.) 
     1006            Q1 += thisNeg * (highPos**2 + highPos*thisPos + thisPos**2 / 3.) 
    9571007 
    9581008            lowNeg += thisNeg 
     
    9621012        Q2 /= (totPos*totNeg**2) 
    9631013 
    964         SE = math.sqrt( (W*(1-W) + (totPos-1)*(Q1-W**2) + (totNeg-1)*(Q2-W**2)) / (totPos*totNeg) ) 
     1014        SE = math.sqrt((W * (1 - W) + (totPos - 1) * (Q1 - W**2) + 
     1015                       (totNeg - 1) * (Q2 - W**2)) / (totPos * totNeg)) 
    9651016        results.append((W, SE)) 
    9661017    return results 
    9671018 
    968 AROC = AUCWilcoxon # for backward compatibility, AROC is obsolote 
     1019AROC = AUCWilcoxon # for backward compatibility, AROC is obsolete 
    9691020 
    9701021 
     
    9731024def compare_2_AUCs(res, lrn1, lrn2, class_index=-1, 
    9741025                   ignore_weights=False, **argkw): 
    975     return corn.compare2ROCs(res, lrn1, lrn2, class_index, res.weights and not ignore_weights) 
    976  
    977 compare_2_AROCs = compare_2_AUCs # for backward compatibility, compare_2_AROCs is obsolote 
     1026    return corn.compare2ROCs(res, lrn1, lrn2, class_index, 
     1027                             res.weights and not ignore_weights) 
     1028 
     1029# for backward compatibility, compare_2_AROCs is obsolete 
     1030compare_2_AROCs = compare_2_AUCs  
    9781031 
    9791032 
    9801033@deprecated_keywords({"classIndex": "class_index"}) 
    9811034def compute_ROC(res, class_index=-1): 
    982     """ Computes a ROC curve as a list of (x, y) tuples, where x is  
     1035    """Compute a ROC curve as a list of (x, y) tuples, where x is  
    9831036    1-specificity and y is sensitivity. 
    9841037    """ 
     
    9901043    for plist in problists: 
    9911044        curve=[(1., 1.)] 
    992         TP, TN = totPos, 0.0 
     1045        TP, TN = totPos, 0. 
    9931046        FN, FP = 0., totNeg 
    9941047        for prob in plist: 
     
    10011054            FP -= thisNeg 
    10021055 
    1003             sens = TP/(TP+FN) 
    1004             spec = TN/(FP+TN) 
    1005             curve.append((1-spec, sens)) 
     1056            sens = TP / (TP + FN) 
     1057            spec = TN / (FP + TN) 
     1058            curve.append((1 - spec, sens)) 
    10061059        results.append(curve) 
    10071060 
     
    10091062 
    10101063## TC's implementation of algorithms, taken from: 
    1011 ## T Fawcett: ROC Graphs: Notes and Practical Considerations for Data Mining Researchers, submitted to KDD Journal.  
     1064## T Fawcett: ROC Graphs: Notes and Practical Considerations for 
     1065## Data Mining Researchers, submitted to KDD Journal. 
    10121066def ROC_slope((P1x, P1y, P1fscore), (P2x, P2y, P2fscore)): 
    10131067    if P1x == P2x: 
     
    10441098 
    10451099    for plist in problists: 
    1046         ## corn gives an increasing by scores list, we need a decreasing by scores 
     1100        ## corn gives an increasing by scores list, we need a decreasing 
    10471101        plist.reverse() 
    1048         TP = 0.0 
    1049         FP = 0.0 
     1102        TP = 0. 
     1103        FP = 0. 
    10501104        curve=[] 
    1051         fPrev = 10e300 # "infinity" score at 0.0, 0.0 
     1105        fPrev = 10e300 # "infinity" score at 0., 0. 
    10521106        for prob in plist: 
    10531107            f = prob[0] 
    1054             if f <> fPrev: 
     1108            if f != fPrev: 
    10551109                if P: 
    1056                     tpr = TP/P 
     1110                    tpr = TP / P 
    10571111                else: 
    1058                     tpr = 0.0 
     1112                    tpr = 0. 
    10591113                if N: 
    1060                     fpr = FP/N 
     1114                    fpr = FP / N 
    10611115                else: 
    1062                     fpr = 0.0 
    1063                 curve = ROC_add_point((fpr, tpr, fPrev), curve, keep_concavities) 
     1116                    fpr = 0. 
     1117                curve = ROC_add_point((fpr, tpr, fPrev), curve, 
     1118                                      keep_concavities) 
    10641119                fPrev = f 
    10651120            thisPos, thisNeg = prob[1][1], prob[1][0] 
     
    10671122            FP += thisNeg 
    10681123        if P: 
    1069             tpr = TP/P 
    1070         else: 
    1071             tpr = 0.0 
     1124            tpr = TP / P 
     1125        else: 
     1126            tpr = 0. 
    10721127        if N: 
    1073             fpr = FP/N 
    1074         else: 
    1075             fpr = 0.0 
     1128            fpr = FP / N 
     1129        else: 
     1130            fpr = 0. 
    10761131        curve = ROC_add_point((fpr, tpr, f), curve, keep_concavities) ## ugly 
    10771132        results.append(curve) 
     
    10791134    return results 
    10801135 
    1081 ## returns a list of points at the intersection of the tangential iso-performance line and the given ROC curve 
     1136## returns a list of points at the intersection of the tangential 
     1137## iso-performance line and the given ROC curve 
    10821138## for given values of FPcost, FNcost and pval 
    10831139def TC_best_thresholds_on_ROC_curve(FPcost, FNcost, pval, curve): 
    1084     m = (FPcost*(1.0 - pval)) / (FNcost*pval) 
    1085  
    1086     ## put the iso-performance line in point (0.0, 1.0) 
    1087     x0, y0 = (0.0, 1.0) 
    1088     x1, y1 = (1.0, 1.0 + m) 
    1089     d01 = math.sqrt((x1 - x0)*(x1 - x0) + (y1 - y0)*(y1 - y0)) 
     1140    m = (FPcost * (1. - pval)) / (FNcost * pval) 
     1141 
     1142    ## put the iso-performance line in point (0., 1.) 
     1143    x0, y0 = (0., 1.) 
     1144    x1, y1 = (1., 1. + m) 
     1145    d01 = math.sqrt((x1 - x0) * (x1 - x0) + (y1 - y0) * (y1 - y0)) 
    10901146 
    10911147    ## calculate and find the closest point to the line 
    10921148    firstp = 1 
    1093     mind = 0.0 
    1094     a = (x0*y1 - x1*y0) 
     1149    mind = 0. 
     1150    a = x0 * y1 - x1 * y0 
    10951151    closestPoints = [] 
    10961152    for (x, y, fscore) in curve: 
    1097         d = ((y0 - y1)*x + (x1 - x0)*y + a) / d01 
     1153        d = ((y0 - y1) * x + (x1 - x0) * y + a) / d01 
    10981154        d = abs(d) 
    10991155        if firstp or d < mind: 
     
    11091165 
    11101166    if end is None: 
    1111         end = start + 0.0 
    1112         start = 0.0 
     1167        end = start + 0. 
     1168        start = 0. 
    11131169 
    11141170    if inc is None or inc == 0: 
    1115         inc = 1.0 
     1171        inc = 1. 
    11161172 
    11171173    L = [start] 
     
    11411197        dx = float(P2x) - float(P1x) 
    11421198        dy = float(P2y) - float(P1y) 
    1143         m = dy/dx 
    1144         return P1y + m*(X - P1x) 
     1199        m = dy / dx 
     1200        return P1y + m * (X - P1x) 
    11451201 
    11461202    def TP_FOR_FP(FPsample, ROC, npts): 
     
    11561212            return tp 
    11571213        elif fp < FPsample and i + 1 < len(ROC): 
    1158             return INTERPOLATE(ROC[i], ROC[i+1], FPsample) 
     1214            return INTERPOLATE(ROC[i], ROC[i + 1], FPsample) 
    11591215        elif fp < FPsample and i + 1 == len(ROC): # return the last 
    11601216            return ROC[i][1] 
    11611217        raise ValueError, "cannot compute: TP_FOR_FP in TC_vertical_average_ROC" 
    1162         #return 0.0 
     1218        #return 0. 
    11631219 
    11641220    average = [] 
     
    11721228        TPavg = [] 
    11731229        TPstd = [] 
    1174         for FPsample in frange(0.0, 1.0, 1.0/samples): 
     1230        for FPsample in frange(0., 1., 1. / samples): 
    11751231            TPsum = [] 
    11761232            for i in range(nrocs): 
    1177                 TPsum.append( TP_FOR_FP(FPsample, ROCS[i], npts[i]) ) ##TPsum = TPsum + TP_FOR_FP(FPsample, ROCS[i], npts[i]) 
    1178             TPavg.append( (FPsample, statc.mean(TPsum)) ) 
     1233                ##TPsum = TPsum + TP_FOR_FP(FPsample, ROCS[i], npts[i]) 
     1234                TPsum.append(TP_FOR_FP(FPsample, ROCS[i], npts[i])) 
     1235            TPavg.append((FPsample, statc.mean(TPsum))) 
    11791236            if len(TPsum) > 1: 
    11801237                stdv = statc.std(TPsum) 
    11811238            else: 
    1182                 stdv = 0.0 
    1183             TPstd.append( stdv ) 
     1239                stdv = 0. 
     1240            TPstd.append(stdv) 
    11841241 
    11851242        average.append(TPavg) 
     
    11951252## returns the average ROC curve, an array of vertical standard deviations and an array of horizontal standard deviations 
    11961253@deprecated_keywords({"ROCcurves": "roc_curves"}) 
    1197 def TC_threshold_average_ROC(roc_curves, samples = 10): 
     1254def TC_threshold_average_ROC(roc_curves, samples=10): 
    11981255    def POINT_AT_THRESH(ROC, npts, thresh): 
    11991256        i = 0 
     
    12281285        TPstdV = [] 
    12291286        TPstdH = [] 
    1230         for tidx in frange(0, (len(T) - 1.0), float(len(T))/samples): 
     1287        for tidx in frange(0, (len(T) - 1.), float(len(T)) / samples): 
    12311288            FPsum = [] 
    12321289            TPsum = [] 
     
    12351292                FPsum.append(fp) 
    12361293                TPsum.append(tp) 
    1237             TPavg.append( (statc.mean(FPsum), statc.mean(TPsum)) ) 
     1294            TPavg.append((statc.mean(FPsum), statc.mean(TPsum))) 
    12381295            ## vertical standard deviation 
    12391296            if len(TPsum) > 1: 
    12401297                stdv = statc.std(TPsum) 
    12411298            else: 
    1242                 stdv = 0.0 
     1299                stdv = 0. 
    12431300            TPstdV.append( stdv ) 
    12441301            ## horizontal standard deviation 
     
    12461303                stdh = statc.std(FPsum) 
    12471304            else: 
    1248                 stdh = 0.0 
     1305                stdh = 0. 
    12491306            TPstdH.append( stdh ) 
    12501307 
     
    12561313 
    12571314## Calibration Curve 
    1258 ## returns an array of (curve, yesClassPredictions, noClassPredictions) elements, where: 
     1315## returns an array of (curve, yesClassPredictions, noClassPredictions) 
     1316## elements, where: 
    12591317##  - curve is an array of points (x, y) on the calibration curve 
    12601318##  - yesClassRugPoints is an array of (x, 1) points 
     
    12631321def compute_calibration_curve(res, class_index=-1): 
    12641322    ## merge multiple iterations into one 
    1265     mres = Orange.evaluation.testing.ExperimentResults(1, res.classifier_names, res.class_values, res.weights, classifiers=res.classifiers, loaded=res.loaded, test_type=res.test_type, labels=res.labels) 
     1323    mres = Orange.evaluation.testing.ExperimentResults(1, res.classifier_names, 
     1324        res.class_values, res.weights, classifiers=res.classifiers, 
     1325        loaded=res.loaded, test_type=res.test_type, labels=res.labels) 
    12661326    for te in res.results: 
    1267         mres.results.append( te ) 
     1327        mres.results.append(te) 
    12681328 
    12691329    problists, tots = corn.computeROCCumulative(mres, class_index) 
     
    12711331    results = [] 
    12721332 
    1273     bins = 10 ## divide interval between 0.0 and 1.0 into N bins 
     1333    bins = 10 ## divide interval between 0. and 1. into N bins 
    12741334 
    12751335    for plist in problists: 
     
    12801340        noBinsVals = [0] * bins 
    12811341        for (f, (thisNeg, thisPos)) in plist: 
    1282             yesClassRugPoints.append( (f, thisPos) ) #1.0 
    1283             noClassRugPoints.append( (f, thisNeg) ) #1.0 
     1342            yesClassRugPoints.append((f, thisPos)) # 1. 
     1343            noClassRugPoints.append((f, thisNeg)) # 1. 
    12841344 
    12851345            index = int(f * bins ) 
    1286             index = min(index, bins - 1) ## just in case for value 1.0 
     1346            index = min(index, bins - 1) ## just in case for value 1. 
    12871347            yesBinsVals[index] += thisPos 
    12881348            noBinsVals[index] += thisNeg 
     
    12901350        curve = [] 
    12911351        for cn in range(bins): 
    1292             f = float(cn * 1.0 / bins) + (1.0 / 2.0 / bins) 
     1352            f = float(cn * 1. / bins) + (1. / 2. / bins) 
    12931353            yesVal = yesBinsVals[cn] 
    12941354            noVal = noBinsVals[cn] 
    12951355            allVal = yesVal + noVal 
    1296             if allVal == 0.0: continue 
    1297             y = float(yesVal)/float(allVal) 
    1298             curve.append( (f,  y) ) 
     1356            if allVal == 0.: continue 
     1357            y = float(yesVal) / float(allVal) 
     1358            curve.append((f,  y)) 
    12991359 
    13001360        ## smooth the curve 
     
    13121372        else: 
    13131373            curve = loessCurve 
    1314         curve = [c[:2] for c in curve] ## remove the third value (variance of epsilon?) that suddenly appeared in the output of the statc.loess function 
     1374        ## remove the third value (variance of epsilon?) that suddenly 
     1375        ## appeared in the output of the statc.loess function 
     1376        curve = [c[:2] for c in curve] 
    13151377        results.append((curve, yesClassRugPoints, noClassRugPoints)) 
    13161378 
     
    13201382## Lift Curve 
    13211383## returns an array of curve elements, where: 
    1322 ##  - curve is an array of points ((TP+FP)/(P + N), TP/P, (th, FP/N)) on the Lift Curve 
     1384##  - curve is an array of points ((TP + FP) / (P + N), TP / P, (th, FP / N)) 
     1385##    on the Lift Curve 
    13231386@deprecated_keywords({"classIndex": "class_index"}) 
    13241387def compute_lift_curve(res, class_index=-1): 
    13251388    ## merge multiple iterations into one 
    1326     mres = Orange.evaluation.testing.ExperimentResults(1, res.classifier_names, res.class_values, res.weights, classifiers=res.classifiers, loaded=res.loaded, test_type=res.test_type, labels=res.labels) 
     1389    mres = Orange.evaluation.testing.ExperimentResults(1, res.classifier_names, 
     1390        res.class_values, res.weights, classifiers=res.classifiers, 
     1391        loaded=res.loaded, test_type=res.test_type, labels=res.labels) 
    13271392    for te in res.results: 
    1328         mres.results.append( te ) 
     1393        mres.results.append(te) 
    13291394 
    13301395    problists, tots = corn.computeROCCumulative(mres, class_index) 
     
    13331398    P, N = tots[1], tots[0] 
    13341399    for plist in problists: 
    1335         ## corn gives an increasing by scores list, we need a decreasing by scores 
     1400        ## corn gives an increasing by scores list, we need a decreasing 
    13361401        plist.reverse() 
    1337         TP = 0.0 
    1338         FP = 0.0 
    1339         curve = [(0.0, 0.0, (10e300, 0.0))] 
     1402        TP = 0. 
     1403        FP = 0. 
     1404        curve = [(0., 0., (10e300, 0.))] 
    13401405        for (f, (thisNeg, thisPos)) in plist: 
    13411406            TP += thisPos 
    13421407            FP += thisNeg 
    1343             curve.append( ((TP+FP)/(P + N), TP, (f, FP/(N or 1))) ) 
     1408            curve.append(((TP + FP) / (P + N), TP, (f, FP / (N or 1)))) 
    13441409        results.append(curve) 
    13451410 
    13461411    return P, N, results 
    1347 ### 
     1412 
    13481413 
    13491414class CDT: 
    1350   """ Stores number of concordant (C), discordant (D) and tied (T) pairs (used for AUC) """ 
    1351   def __init__(self, C=0.0, D=0.0, T=0.0): 
     1415  """Store the number of concordant (C), discordant (D) and tied (T) pairs.""" 
     1416  def __init__(self, C=0., D=0., T=0.): 
    13521417    self.C, self.D, self.T = C, D, T 
    13531418    
     
    13591424                      "unweighted": "ignore_weights"}) 
    13601425def compute_CDT(res, class_index=-1, ignore_weights=False, **argkw): 
    1361     """Obsolete, don't use""" 
    1362     if class_index<0: 
    1363         if res.baseClass>=0: 
     1426    """Obsolete, don't use.""" 
     1427    if class_index < 0: 
     1428        if res.baseClass >= 0: 
    13641429            class_index = res.baseClass 
    13651430        else: 
     
    14031468    """ 
    14041469    Compute the area under ROC curve given a set of experimental results. 
    1405     For multivalued class problems, return the result of :obj:`by_weighted_pairs`. 
    1406     If testing consisted of multiple folds, each fold is scored and 
    1407     average score is returned. If a fold contains only instances with 
     1470    For multivalued class problems, return the result of 
     1471    :obj:`by_weighted_pairs`. 
     1472    If testing consisted of multiple folds, each fold is scored and the 
     1473    average score is returned. If a fold contains only instances with the 
    14081474    same class value, folds will be merged. 
    14091475 
     
    14361502        """ 
    14371503        Compute AUC for each pair of classes (ignoring instances of all other 
    1438         classes) and averages the results, weighting them by the number of 
     1504        classes) and average the results, weighting them by the number of 
    14391505        pairs of instances from these two classes (e.g. by the product of 
    14401506        probabilities of the two classes). AUC computed in this way still 
    1441         behaves as concordance index, e.g., gives the probability that two 
     1507        behaves as the concordance index, e.g., gives the probability that two 
    14421508        randomly chosen instances from different classes will be correctly 
    14431509        recognized (if the classifier knows from which two classes the 
     
    14511517    def by_pairs(cls, res, ignore_weights=False): 
    14521518        """ 
    1453         Similar as above, except that the average over class pairs is not 
    1454         weighted. This AUC is, like the binary, independent of class 
    1455         distributions, but it is not related to concordance index any more. 
     1519        Similar to by_weighted_pairs, except that the average over class pairs 
     1520        is not weighted. This AUC is, like the binary version, independent of 
     1521        class distributions, but it is not related to the concordance index 
     1522        any more. 
    14561523        """ 
    14571524        auc = AUC(ignore_weights=ignore_weights) 
     
    14641531        For each class, it computes AUC for this class against all others (that 
    14651532        is, treating other classes as one class). The AUCs are then averaged by 
    1466         the class probabilities. This is related to concordance index in which 
    1467         we test the classifier's (average) capability for distinguishing the 
    1468         instances from a specified class from those that come from other classes. 
     1533        the class probabilities. This is related to the concordance index in 
     1534        which we test the classifier's (average) capability of distinguishing 
     1535        the instances from a specified class from those that come from other 
     1536        classes. 
    14691537        Unlike the binary AUC, the measure is not independent of class 
    14701538        distributions. 
     
    14771545    @classmethod 
    14781546    def one_against_all(cls, res, ignore_weights=False): 
    1479         """As above, except that the average is not weighted.""" 
     1547        """ 
     1548        Similar to weighted_one_against_all, except that the average 
     1549        is not weighted. 
     1550        """ 
    14801551        auc = AUC(ignore_weights=ignore_weights) 
    14811552        auc._compute_for_multi_value_class(res, method=cls.OneAgainstAll) 
     
    14881559        out and all other classes are treated as a single class. 
    14891560        """ 
    1490         if class_index<0: 
    1491             if res.base_class>=0: 
     1561        if class_index < 0: 
     1562            if res.base_class >= 0: 
    14921563                class_index = res.base_class 
    14931564            else: 
     
    15541625                for classIndex2 in range(classIndex1): 
    15551626                    subsum_aucs = self._compute_for_multiple_folds( 
    1556                                              self._compute_one_class_against_another, 
    1557                                              iterations, 
    1558                                              (classIndex1, classIndex2, 
    1559                                               all_ite, 
    1560                                               res.number_of_iterations)) 
     1627                        self._compute_one_class_against_another, iterations, 
     1628                        (classIndex1, classIndex2, all_ite, 
     1629                        res.number_of_iterations)) 
    15611630                    if subsum_aucs: 
    15621631                        if method == self.ByWeightedPairs: 
     
    15881657        return self 
    15891658 
    1590     # computes the average AUC over folds using a "AUCcomputer" (AUC_i or AUC_ij) 
    1591     # it returns the sum of what is returned by the computer, unless at a certain 
    1592     # fold the computer has to resort to computing over all folds or even this failed; 
     1659    # computes the average AUC over folds using "AUCcomputer" (AUC_i or AUC_ij) 
     1660    # it returns the sum of what is returned by the computer, 
     1661    # unless at a certain fold the computer has to resort to computing 
     1662    # over all folds or even this failed; 
    15931663    # in these cases the result is returned immediately 
    15941664    @deprecated_keywords({"AUCcomputer": "auc_computer", 
     
    16081678        return self 
    16091679 
    1610     # Computes AUC; in multivalued class problem, AUC is computed as one against all 
    1611     # Results over folds are averages; if some folds examples from one class only, the folds are merged 
     1680    # Computes AUC 
     1681    # in multivalued class problem, AUC is computed as one against all 
     1682    # results over folds are averages 
     1683    # if some folds examples from one class only, the folds are merged 
    16121684    def _compute_for_single_class(self, res, class_index): 
    16131685        if res.number_of_iterations > 1: 
     
    16191691 
    16201692    # Computes AUC for a pair of classes (as if there were no other classes) 
    1621     # Results over folds are averages; if some folds have examples from one class only, the folds are merged 
     1693    # results over folds are averages 
     1694    # if some folds have examples from one class only, the folds are merged 
    16221695    def _compute_for_pair_of_classes(self, res, class_index1, class_index2): 
    16231696        if res.number_of_iterations > 1: 
    1624             self._compute_for_multiple_folds(self._compute_one_class_against_another, 
     1697            self._compute_for_multiple_folds( 
     1698                self._compute_one_class_against_another, 
    16251699                split_by_iterations(res), 
    16261700                (class_index1, class_index2, res, res.number_of_iterations)) 
    16271701        else: 
    1628             self._compute_one_class_against_another(res, class_index1, class_index2) 
    1629  
    1630     # computes AUC between class i and the other classes (treating them as the same class) 
     1702            self._compute_one_class_against_another(res, class_index1, 
     1703                                                    class_index2) 
     1704 
     1705    # computes AUC between class i and the other classes 
     1706    # (treating them as the same class) 
    16311707    @deprecated_keywords({"classIndex": "class_index", 
    16321708                          "divideByIfIte": "divide_by_if_ite"}) 
     
    16341710                                      divide_by_if_ite=1.0): 
    16351711        """Compute AUC between class i and all the other classes)""" 
    1636         return self._compute_auc(corn.computeCDT, ite, all_ite, divide_by_if_ite, 
    1637             (class_index, not self.ignore_weights)) 
     1712        return self._compute_auc(corn.computeCDT, ite, all_ite, 
     1713            divide_by_if_ite, (class_index, not self.ignore_weights)) 
    16381714 
    16391715 
    16401716    # computes AUC between classes i and j as if there are no other classes 
    1641     def _compute_one_class_against_another(self, ite, class_index1, 
    1642             class_index2, all_ite=None, 
    1643             divide_by_if_ite=1.0): 
     1717    def _compute_one_class_against_another( 
     1718        self, ite, class_index1, class_index2, 
     1719        all_ite=None, divide_by_if_ite=1.): 
    16441720        """ 
    16451721        Compute AUC between classes i and j as if there are no other classes. 
    16461722        """ 
    1647         return self._compute_auc(corn.computeCDTPair, ite, all_ite, divide_by_if_ite, 
     1723        return self._compute_auc(corn.computeCDTPair, ite, 
     1724            all_ite, divide_by_if_ite, 
    16481725            (class_index1, class_index2, not self.ignore_weights)) 
    16491726 
    16501727    # computes AUC using a specified 'cdtComputer' function 
    1651     # It tries to compute AUCs from 'ite' (examples from a single iteration) and, 
    1652     # if C+D+T=0, from 'all_ite' (entire test set). In the former case, the AUCs 
    1653     # are divided by 'divideByIfIte'. Additional flag is returned which is True in 
    1654     # the former case, or False in the latter. 
     1728    # It tries to compute AUCs from 'ite' (examples from a single iteration) 
     1729    # and, if C+D+T=0, from 'all_ite' (entire test set). In the former case, 
     1730    # the AUCs are divided by 'divideByIfIte'. 
     1731    # Additional flag is returned which is True in the former case, 
     1732    # or False in the latter. 
    16551733    @deprecated_keywords({"cdt_computer": "cdtComputer", 
    16561734                          "divideByIfIte": "divide_by_if_ite", 
    16571735                          "computerArgs": "computer_args"}) 
    16581736    def _compute_auc(self, cdt_computer, ite, all_ite, divide_by_if_ite, 
    1659               computer_args): 
     1737                     computer_args): 
    16601738        """ 
    16611739        Compute AUC using a :obj:`cdt_computer`. 
     
    16631741        cdts = cdt_computer(*(ite, ) + computer_args) 
    16641742        if not is_CDT_empty(cdts[0]): 
    1665             return [(cdt.C+cdt.T/2)/(cdt.C+cdt.D+cdt.T)/divide_by_if_ite for cdt in cdts], True 
     1743            return [(cdt.C + cdt.T / 2) / (cdt.C + cdt.D + cdt.T) / 
     1744                    divide_by_if_ite for cdt in cdts], True 
    16661745 
    16671746        if all_ite: 
    16681747            cdts = cdt_computer(*(all_ite, ) + computer_args) 
    16691748            if not is_CDT_empty(cdts[0]): 
    1670                 return [(cdt.C+cdt.T/2)/(cdt.C+cdt.D+cdt.T) for cdt in cdts], False 
     1749                return [(cdt.C + cdt.T / 2) / (cdt.C + cdt.D + cdt.T) 
     1750                        for cdt in cdts], False 
    16711751 
    16721752        return False, False 
     
    16791759        else: 
    16801760            iterations, all_ite = [res], None 
    1681         aucs = [[[] for _ in range(numberOfClasses)] for _ in 
    1682                                                     range(number_of_learners)] 
     1761        aucs = [[[] for _ in range(numberOfClasses)] 
     1762                for _ in range(number_of_learners)] 
    16831763        for classIndex1 in range(numberOfClasses): 
    16841764            for classIndex2 in range(classIndex1): 
    16851765                pair_aucs = self._compute_for_multiple_folds( 
    1686                     self._compute_one_class_against_another, 
    1687                     iterations, (classIndex1, 
    1688                                                   classIndex2, 
    1689                                                   all_ite, 
    1690                                                   res.number_of_iterations)) 
     1766                    self._compute_one_class_against_another, iterations, 
     1767                    (classIndex1, classIndex2, all_ite, 
     1768                     res.number_of_iterations)) 
    16911769                if pair_aucs: 
    16921770                    for lrn in range(number_of_learners): 
     
    17111789 
    17121790@replace_use_weights 
    1713 def AUC_multi(res, ignore_weights=False, method = 0): 
     1791def AUC_multi(res, ignore_weights=False, method=0): 
    17141792    auc = deprecated_function_name(AUC)(ignore_weights=ignore_weights, 
    17151793        method=method) 
     
    17251803    auc = deprecated_function_name(AUC)() 
    17261804    result = auc._compute_auc(cdtComputer, ite, all_ite, divide_by_if_ite, 
    1727         computer_args) 
     1805                              computer_args) 
    17281806    return result 
    17291807 
    17301808@replace_use_weights 
    17311809def AUC_i(ite, class_index, ignore_weights=False, all_ite=None, 
    1732           divide_by_if_ite=1.0): 
     1810          divide_by_if_ite=1.): 
    17331811    auc = deprecated_function_name(AUC)() 
    17341812    result = auc._compute_one_class_against_another(ite, class_index, 
    1735         all_ite=None, divide_by_if_ite=1.0) 
     1813        all_ite=None, divide_by_if_ite=1.) 
    17361814    return result 
    17371815 
     
    17391817@replace_use_weights 
    17401818def AUC_ij(ite, class_index1, class_index2, ignore_weights=False, 
    1741            all_ite = None, divide_by_if_ite = 1.0): 
     1819           all_ite=None, divide_by_if_ite=1.): 
    17421820    auc = deprecated_function_name(AUC)(ignore_weights=ignore_weights) 
    17431821    result = auc._compute_one_class_against_another( 
    1744         ite, class_index1, class_index2, all_ite = None, divide_by_if_ite = 1.0) 
     1822        ite, class_index1, class_index2, all_ite=None, divide_by_if_ite=1.) 
    17451823    return result 
    17461824 
     
    17671845@deprecated_keywords({"unweighted": "ignore_weights"}) 
    17681846def McNemar(res, ignore_weights=False, **argkw): 
    1769     """ Computes a triangular matrix with McNemar statistics for each pair of 
     1847    """ 
     1848    Compute a triangular matrix with McNemar statistics for each pair of 
    17701849    classifiers. The statistics is distributed by chi-square distribution with 
    17711850    one degree of freedom; critical value for 5% significance is around 3.84. 
     
    17741853    mcm = [] 
    17751854    for i in range(nLearners): 
    1776        mcm.append([0.0]*res.number_of_learners) 
     1855       mcm.append([0.] * res.number_of_learners) 
    17771856 
    17781857    if not res.weights or ignore_weights: 
     
    17821861            for l1 in range(nLearners): 
    17831862                for l2 in range(l1, nLearners): 
    1784                     if classes[l1]==actual: 
    1785                         if classes[l2]!=actual: 
     1863                    if classes[l1] == actual: 
     1864                        if classes[l2] != actual: 
    17861865                            mcm[l1][l2] += 1 
    1787                     elif classes[l2]==actual: 
     1866                    elif classes[l2] == actual: 
    17881867                        mcm[l2][l1] += 1 
    17891868    else: 
     
    17931872            for l1 in range(nLearners): 
    17941873                for l2 in range(l1, nLearners): 
    1795                     if classes[l1]==actual: 
    1796                         if classes[l2]!=actual: 
     1874                    if classes[l1] == actual: 
     1875                        if classes[l2] != actual: 
    17971876                            mcm[l1][l2] += i.weight 
    1798                     elif classes[l2]==actual: 
     1877                    elif classes[l2] == actual: 
    17991878                        mcm[l2][l1] += i.weight 
    18001879 
    18011880    for l1 in range(nLearners): 
    18021881        for l2 in range(l1, nLearners): 
    1803             su=mcm[l1][l2] + mcm[l2][l1] 
     1882            su = mcm[l1][l2] + mcm[l2][l1] 
    18041883            if su: 
    1805                 mcm[l2][l1] = (abs(mcm[l1][l2]-mcm[l2][l1])-1)**2 / su 
     1884                mcm[l2][l1] = (abs(mcm[l1][l2] - mcm[l2][l1]) - 1)**2 / su 
    18061885            else: 
    18071886                mcm[l2][l1] = 0 
     
    18141893 
    18151894def McNemar_of_two(res, lrn1, lrn2, ignore_weights=False): 
    1816     """ McNemar_of_two computes a McNemar statistics for a pair of classifier, 
     1895    """ 
     1896    McNemar_of_two computes a McNemar statistics for a pair of classifier, 
    18171897    specified by indices learner1 and learner2. 
    18181898    """ 
    1819     tf = ft = 0.0 
     1899    tf = ft = 0. 
    18201900    if not res.weights or ignore_weights: 
    18211901        for i in res.results: 
    1822             actual=i.actual_class 
    1823             if i.classes[lrn1]==actual: 
    1824                 if i.classes[lrn2]!=actual: 
     1902            actual = i.actual_class 
     1903            if i.classes[lrn1] == actual: 
     1904                if i.classes[lrn2] != actual: 
    18251905                    tf += i.weight 
    1826             elif i.classes[lrn2]==actual: 
     1906            elif i.classes[lrn2] == actual: 
    18271907                    ft += i.weight 
    18281908    else: 
    18291909        for i in res.results: 
    1830             actual=i.actual_class 
    1831             if i.classes[lrn1]==actual: 
    1832                 if i.classes[lrn2]!=actual: 
    1833                     tf += 1.0 
    1834             elif i.classes[lrn2]==actual: 
    1835                     ft += 1.0 
     1910            actual = i.actual_class 
     1911            if i.classes[lrn1] == actual: 
     1912                if i.classes[lrn2] != actual: 
     1913                    tf += 1. 
     1914            elif i.classes[lrn2] == actual: 
     1915                    ft += 1. 
    18361916 
    18371917    su = tf + ft 
    18381918    if su: 
    1839         return (abs(tf-ft)-1)**2 / su 
     1919        return (abs(tf - ft) - 1)**2 / su 
    18401920    else: 
    18411921        return 0 
     
    18431923 
    18441924def Friedman(res, stat=CA): 
    1845     """ Compares classifiers by Friedman test, treating folds as different examles. 
    1846         Returns F, p and average ranks 
     1925    """ 
     1926    Compare classifiers with Friedman test, treating folds as different examles. 
     1927    Returns F, p and average ranks. 
    18471928    """ 
    18481929    res_split = split_by_iterations(res) 
     
    18511932    N = len(res) 
    18521933    k = len(res[0]) 
    1853     sums = [0.0]*k 
     1934    sums = [0.] * k 
    18541935    for r in res: 
    1855         ranks = [k-x+1 for x in statc.rankdata(r)] 
    1856         if stat==Brier_score: # reverse ranks for Brier_score (lower better) 
    1857             ranks = [k+1-x for x in ranks] 
     1936        ranks = [k - x + 1 for x in statc.rankdata(r)] 
     1937        if stat == Brier_score: # reverse ranks for Brier_score (lower better) 
     1938            ranks = [k + 1 - x for x in ranks] 
    18581939        sums = map(add, ranks, sums) 
    18591940 
    1860     T = sum(x*x for x in sums) 
    1861     sums = [x/N for x in sums] 
    1862  
    1863     F = 12.0 / (N*k*(k+1)) * T  - 3 * N * (k+1) 
    1864  
    1865     return F, statc.chisqprob(F, k-1), sums 
     1941    T = sum(x * x for x in sums) 
     1942    sums = [x / N for x in sums] 
     1943 
     1944    F = 12. / (N * k * (k + 1)) * T  - 3 * N * (k + 1) 
     1945 
     1946    return F, statc.chisqprob(F, k - 1), sums 
    18661947 
    18671948 
    18681949def Wilcoxon_pairs(res, avgranks, stat=CA): 
    1869     """ Returns a triangular matrix, where element[i][j] stores significance of difference 
    1870         between i-th and j-th classifier, as computed by Wilcoxon test. The element is positive 
    1871         if i-th is better than j-th, negative if it is worse, and 1 if they are equal. 
    1872         Arguments to function are ExperimentResults, average ranks (as returned by Friedman) 
    1873         and, optionally, a statistics; greater values should mean better results.append 
     1950    """ 
     1951    Return a triangular matrix, where element[i][j] stores significance of 
     1952    difference between the i-th and the j-th classifier, as computed by the 
     1953    Wilcoxon test. The element is positive if the i-th is better than the j-th, 
     1954    negative if it is worse, and 1 if they are equal. 
     1955    Arguments are ExperimentResults, average ranks (as returned by Friedman) 
     1956    and, optionally, a statistics; greater values should mean better results. 
    18741957    """ 
    18751958    res_split = split_by_iterations(res) 
     
    18801963    for m1 in range(k): 
    18811964        nl = [] 
    1882         for m2 in range(m1+1, k): 
     1965        for m2 in range(m1 + 1, k): 
    18831966            t, p = statc.wilcoxont([r[m1] for r in res], [r[m2] for r in res]) 
    18841967            if avgranks[m1]<avgranks[m2]: 
     
    18941977@deprecated_keywords({"allResults": "all_results", 
    18951978                      "noConfidence": "no_confidence"}) 
    1896 def plot_learning_curve_learners(file, all_results, proportions, learners, no_confidence=0): 
    1897     plot_learning_curve(file, all_results, proportions, [Orange.misc.getobjectname(learners[i], "Learner %i" % i) for i in range(len(learners))], no_confidence) 
     1979def plot_learning_curve_learners(file, all_results, proportions, learners, 
     1980                                 no_confidence=0): 
     1981    plot_learning_curve(file, all_results, proportions, 
     1982        [Orange.misc.getobjectname(learners[i], "Learner %i" % i) 
     1983        for i in range(len(learners))], no_confidence) 
    18981984 
    18991985 
    19001986@deprecated_keywords({"allResults": "all_results", 
    19011987                      "noConfidence": "no_confidence"}) 
    1902 def plot_learning_curve(file, all_results, proportions, legend, no_confidence=0): 
     1988def plot_learning_curve(file, all_results, proportions, legend, 
     1989                        no_confidence=0): 
    19031990    import types 
    1904     fopened=0 
    1905     if type(file)==types.StringType: 
    1906         file=open(file, "wt") 
    1907         fopened=1 
     1991    fopened = 0 
     1992    if type(file) == types.StringType: 
     1993        file = open(file, "wt") 
     1994        fopened = 1 
    19081995         
    19091996    file.write("set yrange [0:1]\n") 
     
    19132000 
    19142001    file.write("plot \\\n") 
    1915     for i in range(len(legend)-1): 
     2002    for i in range(len(legend) - 1): 
    19162003        if not no_confidence: 
    19172004            file.write("'-' title '' with yerrorbars pointtype %i,\\\n" % (i+1)) 
     
    26062693 
    26072694 
    2608 ######################################################################################### 
     2695################################################################################ 
    26092696if __name__ == "__main__": 
    26102697    avranks =  [3.143, 2.000, 2.893, 1.964] 
  • Orange/regression/linear.py

    r10294 r10348  
    132132    from scipy import stats 
    133133except ImportError: 
    134     import statc as stats 
     134    from Orange import statc as stats 
    135135 
    136136from numpy import dot, sqrt 
  • Orange/regression/pls.py

    r10330 r10367  
    4141 
    4242To predict values for the first two data instances 
    43 use the followin code  
     43use the following code:  
    4444 
    4545.. literalinclude:: code/pls-example.py 
    4646    :lines: 16-20 
    4747 
    48 Output 
    49  
    50 :: 
     48Output:: 
    5149 
    5250    Actual     [<orange.Value 'Y1'='0.490'>, <orange.Value 'Y2'='1.237'>, <orange.Value 'Y3'='1.808'>, <orange.Value 'Y4'='0.422'>] 
     
    5755 
    5856To see the coefficient of the model (in this case they are stored in a matrix) 
    59 print the model 
     57print the model: 
    6058 
    6159.. literalinclude:: code/pls-example.py 
    6260    :lines: 22 
    6361 
    64 The ouptut looks like 
    65  
    66 :: 
     62The ouptut looks like this:: 
    6763 
    6864    Regression coefficients: 
  • docs/reference/rst/Orange.classification.classfromvar.rst

    r10373 r10376  
    2424.. class:: ClassifierFromVar(which_var[, transformer]) 
    2525     
    26     Return the value of variable :obj:`which_var`; transform it by the 
    27     :obj:`transformer`, if it is given. 
     26    Return the value of variable :obj:`~ClassifierFromVar.which_var`; 
     27    transform it by the :obj:`~ClassifierFromVar.transformer`, if it 
     28    is given. 
    2829  
    2930    .. attribute:: which_var 
     
    3435 
    3536        The transformer for the value. It should be a class derived 
    36         from :obj:`Orange.data.utils.TransformValue` or a function 
     37        from :obj:`~Orange.data.utils.TransformValue` or a function 
    3738        written in Python. 
    3839 
     
    4344    .. attribute:: distribution_for_unknown 
    4445 
    45         The distribution that is returned when the `which_var`'s value 
    46         is undefined and :obj:`transform_unknowns` is ``False``. 
     46        The distribution that is returned when the 
     47        :obj:`~ClassifierFromVar.which_var`'s value is undefined and 
     48        :obj:`~ClassifierFromVar.transform_unknowns` is ``False``. 
    4749 
    4850    .. method:: __call__(inst[, result_type]) 
    4951 
    5052        Return ``transformer(instance[which_var])``. The value of 
    51         :obj:`which_var` can be either an ordinary variable, a meta 
    52         variable or a variable which is not defined for the instance 
    53         but its descriptor has a 
     53        :obj:`~ClassifierFromVar.which_var` can be either an ordinary 
     54        variable, a meta variable or a variable which is not defined 
     55        for the instance but its descriptor has a 
    5456        :obj:`~Orange.feature.Descriptor.get_value_from` that can be 
    5557        used to compute the value. 
     
    5759        If the feature is not found or its value is missing, the 
    5860        missing value is passed to the transformer if 
    59         :obj:`transform_unknowns` is ``True``. Otherwise, 
    60         :obj:`distribution_for_unknown` is returned. 
     61        :obj:`~ClassifierFromVar.transform_unknowns` is 
     62        ``True``. Otherwise, 
     63        :obj:`~ClassifierFromVar.distribution_for_unknown` is 
     64        returned. 
    6165 
    6266The following example demonstrates the use of the class on the Monk 1 
     
    7175.. class:: ClassifierFromVarFD 
    7276 
     77 
    7378    A class similar to 
    7479    :obj:`~Orange.classification.ClassifierFromVar` except that the 
     
    8186    :obj:`Orange.feature.Descriptor.get_value_from`. 
    8287 
    83     .. attribute:: domain (inherited from ClassifierFromVarFD) 
     88    .. attribute:: domain (inherited from :obj:`ClassifierFromVarFD`) 
    8489     
    8590        The domain to which the :obj:`position` applies. 
  • docs/reference/rst/Orange.classification.lookup.rst

    r10347 r10363  
    5050     
    5151We can check the correctness of the script by printing out several 
    52 random examples from ``data2``. 
     52random examples from table ``monks2``. 
    5353 
    5454    >>> for i in range(5): 
  • docs/reference/rst/code/classifier-from-var-example.py

    r10123 r10361  
    1111        return 1 
    1212 
    13 e1.getValueFrom = Orange.classification.ClassifierFromVar() 
    14 e1.getValueFrom.whichVar = e 
    15 e1.getValueFrom.transformer = eTransformer 
     13e1.get_value_from = Orange.classification.ClassifierFromVar() 
     14e1.get_value_from.whichVar = e 
     15e1.get_value_from.transformer = eTransformer 
    1616 
    1717monks2 = monks.select(["a", "b", "e", e1, "y"]) 
     
    1919    print i 
    2020 
    21 e1.getValueFrom = Orange.classification.ClassifierFromVarFD() 
    22 e1.getValueFrom.domain = monks.domain 
    23 e1.getValueFrom.position = monks.domain.attributes.index(e) 
    24 e1.getValueFrom.transformer = eTransformer 
     21e1.get_value_from = Orange.classification.ClassifierFromVarFD() 
     22e1.get_value_from.domain = monks.domain 
     23e1.get_value_from.position = monks.domain.attributes.index(e) 
     24e1.get_value_from.transformer = eTransformer 
    2525 
  • setup.py

    r10344 r10366  
    33import os, sys         
    44import distutils.core 
    5 from distutils.core import setup 
     5try: 
     6    from setuptools import setup 
     7    from setuptools.command.install import install 
     8    have_setuptools = True 
     9except ImportError: 
     10    from distutils.core import setup 
     11    from distutils.command.install import install 
     12    have_setuptools = False 
     13 
    614from distutils.core import Extension 
    715from distutils.command.build_ext import build_ext 
    816from distutils.command.install_lib import install_lib 
    9 from distutils.command.install import install 
    1017from distutils.util import convert_path 
    1118from distutils.msvccompiler import MSVCCompiler 
    1219from distutils.unixccompiler import UnixCCompiler 
    13  
    14 # This is set in setupegg.py 
    15 have_setuptools = getattr(distutils.core, "have_setuptools", False)  
     20  
     21if have_setuptools: 
     22    setuptools_args = {"zip_safe": False, 
     23                       "install_requires": ["numpy"], 
     24                       "extras_require": {"GUI": ["PyQt4", "PyQwt"], 
     25                                          "NETWORK": ["networkx"]} 
     26                      } 
     27else: 
     28    setuptools_args = {} 
     29 
    1630 
    1731import re 
     
    2842from distutils.sysconfig import get_python_inc, get_config_var 
    2943 
    30 import numpy 
    31 numpy_include_dir = numpy.get_include() 
     44try: 
     45    import numpy 
     46    numpy_include_dir = numpy.get_include() 
     47except ImportError: 
     48    # When setup.py is first run to install orange, numpy can still be missing 
     49    pass 
     50    numpy_include_dir = None 
     51     
    3252python_include_dir = get_python_inc(plat_specific=1) 
    3353 
     
    401421        # Create a .pth file with a path inside the Orange/orng directory 
    402422        # so the old modules are importable 
    403         self.path_file, self.extra_dirs = ("orange-orng-modules", "Orange/orng") 
     423        self.path_file, self.extra_dirs = ("Orange-orng-modules", "Orange/orng") 
    404424        self.extra_dirs = convert_path(self.extra_dirs) 
    405425        log.info("creating portal path for orange compatibility.") 
     
    531551      matches.append(os.path.join(root, filename)) 
    532552packages = [os.path.dirname(pkg).replace(os.path.sep, '.') for pkg in matches] 
    533  
    534 if have_setuptools: 
    535     setuptools_args = {"zip_safe": False, 
    536                        "install_requires": ["numpy"], 
    537                        "extra_requires": ["networkx", "PyQt4", "PyQwt"] 
    538                        } 
    539 else: 
    540     setuptools_args = {} 
    541553 
    542554setup(cmdclass={"build_ext": pyxtract_build_ext, 
     
    563575                             "Orange.OrangeWidgets.plot", 
    564576                             "Orange.OrangeWidgets.plot.primitives", 
    565                              "Orange.doc", 
    566577                             ], 
    567578       
    568       # Python 2.6 does not include files from package_data into 
    569       # the manifest so also add all these files in MANIFEST.in 
    570       # manually  
    571       package_data = {"Orange": [ 
    572           "OrangeCanvas/icons/*.png", 
    573           "OrangeCanvas/orngCanvas.pyw", 
    574           "OrangeCanvas/WidgetTabs.txt", 
    575           "OrangeWidgets/icons/*.png", 
    576           "OrangeWidgets/icons/backgrounds/*.png", 
    577           "OrangeWidgets/report/index.html", 
    578           "OrangeWidgets/Associate/icons/*.png", 
    579           "OrangeWidgets/Classify/icons/*.png", 
    580           "OrangeWidgets/Data/icons/*.png", 
    581           "OrangeWidgets/Evaluate/icons/*.png", 
    582           "OrangeWidgets/Prototypes/icons/*.png", 
    583           "OrangeWidgets/Regression/icons/*.png", 
    584           "OrangeWidgets/Unsupervised/icons/*.png", 
    585           "OrangeWidgets/Visualize/icons/*.png", 
    586           "OrangeWidgets/Visualize Qt/icons/*.png", 
    587           "OrangeWidgets/plot/*.gs", 
    588           "OrangeWidgets/plot/*.vs", 
    589           "OrangeWidgets/plot/primitives/*.obj", 
    590           # TODO: Doc datasets and files should be installed using data_files. 
    591           "doc/datasets/*.tab", 
    592           "doc/networks/*.net", 
    593           "doc/networks/*.tab", 
    594           "doc/style.css", 
    595           "doc/widgets/*/*.*", 
    596           "orng/orangerc.cfg" 
    597           ] 
    598                       }, 
    599        
     579      package_data = { 
     580          "Orange" : ["orangerc.cfg", "doc/datasets/*.tab", "doc/datasets/*.csv", "doc/datasets/*.basket", 
     581                      "doc/networks/*.net", "doc/networks/*.tab", 
     582                      "doc/style.css", "doc/widgets/*/*.*", 
     583                      "testing/regression/tests_20/*.tab", 
     584                      "testing/regression/tests_20/*.net", 
     585                      "testing/regression/tests_20/*.basket", 
     586                      "testing/regression/tests_20/*.csv"], 
     587          "Orange.OrangeCanvas": ["icons/*.png", "orngCanvas.pyw", "WidgetTabs.txt"], 
     588          "Orange.OrangeWidgets":["icons/*.png", "icons/backgrounds/*.png", "report/index.html"], 
     589          "Orange.OrangeWidgets.Associate": ["icons/*.png"], 
     590          "Orange.OrangeWidgets.Classify": ["icons/*.png"], 
     591          "Orange.OrangeWidgets.Data": ["icons/*.png"], 
     592          "Orange.OrangeWidgets.Evaluate": ["icons/*.png"], 
     593          "Orange.OrangeWidgets.Prototypes": ["icons/*.png"], 
     594          "Orange.OrangeWidgets.Regression": ["icons/*.png"], 
     595          "Orange.OrangeWidgets.Unsupervised": ["icons/*.png"], 
     596          "Orange.OrangeWidgets.Visualize": ["icons/*.png"], 
     597          "Orange.OrangeWidgets.Visualize Qt": ["icons/*.png"], 
     598          "Orange.OrangeWidgets.plot": ["*.gs", "*.vs"], 
     599          "Orange.OrangeWidgets.plot.primitives": ["*.obj"], 
     600          }, 
    600601      ext_modules = [include_ext, orange_ext, orangeom_ext, 
    601602                     orangene_ext, corn_ext, statc_ext], 
  • source/orange/lib_kernel.cpp

    r10220 r10357  
    21572157    for(pe = pi; *pe && *pe != sep; pe++); 
    21582158    const int plen = pe-pi; 
    2159      char *npath = strncpy(new char[plen+flen+2], pi, pe-pi); 
    2160     if (!plen || (pe[plen] != pathsep)) { 
     2159    char *npath = strncpy(new char[plen+flen+2], pi, pe-pi); 
     2160    if (!plen || (pi[plen] != pathsep)) { 
    21612161      npath[plen] = pathsep; 
    21622162      strcpy(npath+plen+1, filename); 
Note: See TracChangeset for help on using the changeset viewer.