Changeset 9505:4b798678cd3d in orange


Ignore:
Timestamp:
12/31/11 03:38:05 (2 years ago)
Author:
matija <matija.polajnar@…>
Branch:
default
Parents:
9431:7fc716597428 (diff), 9504:c386e6edfaaf (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Convert:
0cda4b68ffbb906ca5189adc65d5fa46c9fbaa4a
Message:

Merge in the (heavily modified) MLC code from GSOC 2011 (modules, documentation, evaluation code, regression test). Widgets will be merged in a little bit later, which will finally close ticket #992.

Files:
11 added
289 deleted
25 edited

Legend:

Unmodified
Added
Removed
  • docs/reference/rst/index.rst

    r9385 r9505  
    1212   Orange.classification 
    1313 
     14   Orange.multilabel 
     15 
    1416   Orange.clustering 
    15     
     17 
    1618   Orange.distance 
    1719 
  • orange/Orange/__init__.py

    r9308 r9505  
    6363_import("multitarget") 
    6464 
     65_import("multilabel") 
     66_import("multilabel.multibase") 
     67_import("multilabel.br") 
     68_import("multilabel.lp") 
     69_import("multilabel.mlknn") 
     70_import("multilabel.brknn") 
     71_import("multilabel.mulan") 
     72 
    6573_import("associate") 
    6674 
  • orange/Orange/data/io.py

    r9111 r9505  
    102102 
    103103def loadARFF(filename, create_on_new = MakeStatus.Incompatible, **kwargs): 
     104    """Return class:`Orange.data.Table` containing data from file in Weka ARFF format 
     105       if there exists no .xml file with the same name. If it does, a multi-label 
     106       dataset is read and returned. 
     107    """ 
     108    if filename[-5:] == ".arff": 
     109        filename = filename[:-5] 
     110    if os.path.exists(filename + ".xml") and os.path.exists(filename + ".arff"): 
     111        xml_name = filename + ".xml"  
     112        arff_name = filename + ".arff"  
     113        return Orange.multilabel.mulan.trans_mulan_data(xml_name,arff_name,create_on_new) 
     114    else: 
     115        return loadARFF_Weka(filename, create_on_new) 
     116         
     117def loadARFF_Weka(filename, create_on_new = Orange.data.variable.Variable.MakeStatus.Incompatible, **kwargs): 
    104118    """Return class:`Orange.data.Table` containing data from file in Weka ARFF format""" 
    105119    if not os.path.exists(filename) and os.path.exists(filename + ".arff"): 
     
    122136            print "ARFF import ignoring:",x 
    123137        if state == 1: 
    124             dd = x.split(',') 
    125             r = [] 
    126             for xs in dd: 
    127                 y = xs.strip(" ") 
    128                 if len(y) > 0: 
    129                     if y[0]=="'" or y[0]=='"': 
    130                         r.append(xs.strip("'\"")) 
     138            if x[0] == '{':#sparse data format, begin with '{', ends with '}' 
     139                r = [None]*len(attributes) 
     140                dd = x[1:-1] 
     141                dd = dd.split(',') 
     142                for xs in dd: 
     143                    y = xs.split(" ") 
     144                    if len(y) <> 2: 
     145                        raise ValueError("the format of the data is error") 
     146                    r[int(y[0])] = y[1] 
     147                data.append(r) 
     148            else:#normal data format, split by ',' 
     149                dd = x.split(',') 
     150                r = [] 
     151                for xs in dd: 
     152                    y = xs.strip(" ") 
     153                    if len(y) > 0: 
     154                        if y[0]=="'" or y[0]=='"': 
     155                            r.append(xs.strip("'\"")) 
     156                        else: 
     157                            ns = xs.split() 
     158                            for ls in ns: 
     159                                if len(ls) > 0: 
     160                                    r.append(ls) 
    131161                    else: 
    132                         ns = xs.split() 
    133                         for ls in ns: 
    134                             if len(ls) > 0: 
    135                                 r.append(ls) 
    136                 else: 
    137                     r.append('?') 
    138             data.append(r[:len(attributes)]) 
     162                        r.append('?') 
     163                data.append(r[:len(attributes)]) 
    139164        else: 
    140165            y = [] 
     
    180205    t = Orange.data.Table(d,lex) 
    181206    t.name = name 
    182     t.attribute_load_status = attributeLoadStatus 
     207     
     208    if hasattr(t, "attribute_load_status"): 
     209        t.attribute_load_status = attributeLoadStatus 
    183210    return t 
    184211loadARFF = Orange.misc.deprecated_keywords( 
     
    243270        f.write('%s\n'%x[-1]) 
    244271 
     272def loadMULAN(filename, create_on_new = Orange.data.variable.Variable.MakeStatus.Incompatible, **kwargs): 
     273    """Return class:`Orange.data.Table` containing data from file in Mulan ARFF and XML format""" 
     274    if filename[-4:] == ".xml": 
     275        filename = filename[:-4] 
     276    if os.path.exists(filename + ".xml") and os.path.exists(filename + ".arff"): 
     277        xml_name = filename + ".xml"  
     278        arff_name = filename + ".arff"  
     279        return Orange.multilabel.mulan.trans_mulan_data(xml_name,arff_name) 
     280    else: 
     281        return None 
     282loadARFF = Orange.misc.deprecated_keywords( 
     283{"createOnNew": "create_on_new"} 
     284)(loadARFF) 
     285 
    245286def toC50(filename, table, try_numericize=0): 
    246287    """Save class:`Orange.data.Table` to file in C50 format""" 
     
    737778register_file_type("R", None, toR, ".R") 
    738779register_file_type("Weka", loadARFF, toARFF, ".arff") 
     780register_file_type("Mulan", loadMULAN, None, ".xml") 
    739781#registerFileType("C50", None, toC50, [".names", ".data", ".test"]) 
    740782register_file_type("libSVM", loadLibSVM, toLibSVM, ".svm") 
  • orange/Orange/evaluation/scoring.py

    r9354 r9505  
    136136    
    137137   The function then returns a three-dimensional matrix, where the element 
    138    A[:obj:`learner`][:obj:`actualClass`][:obj:`predictedClass`] 
    139    gives the number of instances belonging to 'actualClass' for which the 
     138   A[:obj:`learner`][:obj:`actual_class`][:obj:`predictedClass`] 
     139   gives the number of instances belonging to 'actual_class' for which the 
    140140   'learner' predicted 'predictedClass'. We shall compute and print out 
    141141   the matrix for naive Bayesian classifier. 
     
    317317results by folds, call the function for each fold separately and then sum 
    318318the results up however you see fit, or you can set the ExperimentResults' 
    319 attribute numberOfIterations to 1, to cheat the function - at your own 
     319attribute number_of_iterations to 1, to cheat the function - at your own 
    320320responsibility for the statistical correctness. Regarding the multi-class 
    321321problems, if you don't chose a specific class, Orange.evaluation.scoring will use the class 
     
    404404 
    405405.. autofunction:: split_by_iterations 
     406 
     407===================================== 
     408Scoring for multilabel classification 
     409===================================== 
     410 
     411Multi-label classification requries different metrics than those used in traditional single-label  
     412classification. This module presents the various methrics that have been proposed in the literature.  
     413Let :math:`D` be a multi-label evaluation data set, conisting of :math:`|D|` multi-label examples  
     414:math:`(x_i,Y_i)`, :math:`i=1..|D|`, :math:`Y_i \\subseteq L`. Let :math:`H` be a multi-label classifier  
     415and :math:`Z_i=H(x_i)` be the set of labels predicted by :math:`H` for example :math:`x_i`. 
     416 
     417.. autofunction:: mlc_hamming_loss  
     418.. autofunction:: mlc_accuracy 
     419.. autofunction:: mlc_precision 
     420.. autofunction:: mlc_recall 
     421 
     422So, let's compute all this in part of  
     423(`ml-evaluator.py`_, uses `multidata.tab`_) and print it out: 
     424 
     425.. literalinclude:: code/mlc-evaluator.py 
     426   :lines: 1-15 
     427 
     428.. _multidata.tab: code/multidata.tab 
     429.. _ml-evaluator.py: code/mlc-evaluator.py 
     430 
     431The output should look like this:: 
     432 
     433    loss= [0.9375] 
     434    accuracy= [0.875] 
     435    precision= [1.0] 
     436    recall= [0.875] 
     437 
     438References 
     439========== 
     440 
     441Boutell, M.R., Luo, J., Shen, X. & Brown, C.M. (2004), 'Learning multi-label scene classification', 
     442Pattern Recogintion, vol.37, no.9, pp:1757-71 
     443 
     444Godbole, S. & Sarawagi, S. (2004), 'Discriminative Methods for Multi-labeled Classification', paper  
     445presented to Proceedings of the 8th Pacific-Asia Conference on Knowledge Discovery and Data Mining  
     446(PAKDD 2004) 
     447  
     448Schapire, R.E. & Singer, Y. (2000), 'Boostexter: a bossting-based system for text categorization',  
     449Machine Learning, vol.39, no.2/3, pp:135-68. 
    406450 
    407451""" 
     
    444488    of ExperimentResults, one for each iteration. 
    445489    """ 
    446     if res.numberOfIterations < 2: 
     490    if res.number_of_iterations < 2: 
    447491        return [res] 
    448492         
    449     ress = [Orange.evaluation.testing.ExperimentResults(1, res.classifierNames, res.classValues, res.weights, classifiers=res.classifiers, loaded=res.loaded) 
    450             for i in range(res.numberOfIterations)] 
     493    ress = [Orange.evaluation.testing.ExperimentResults(1, res.classifier_names, res.class_values, res.weights, classifiers=res.classifiers, loaded=res.loaded) 
     494            for i in range(res.number_of_iterations)] 
    451495    for te in res.results: 
    452         ress[te.iterationNumber].results.append(te) 
     496        ress[te.iteration_number].results.append(te) 
    453497    return ress 
    454498 
     
    475519def class_probabilities_from_res(res, **argkw): 
    476520    """Calculate class probabilities""" 
    477     probs = [0.0] * len(res.classValues) 
     521    probs = [0.0] * len(res.class_values) 
    478522    if argkw.get("unweighted", 0) or not res.weights: 
    479523        for tex in res.results: 
    480             probs[int(tex.actualClass)] += 1.0 
     524            probs[int(tex.actual_class)] += 1.0 
    481525        totweight = gettotsize(res) 
    482526    else: 
    483527        totweight = 0.0 
    484528        for tex in res.results: 
    485             probs[tex.actualClass] += tex.weight 
     529            probs[tex.actual_class] += tex.weight 
    486530            totweight += tex.weight 
    487531        check_non_zero(totweight) 
     
    494538        if not stats: 
    495539            raise ValueError, "Cannot compute the score: no examples or sum of weights is 0.0." 
    496         numberOfLearners = len(stats[0]) 
     540        number_of_learners = len(stats[0]) 
    497541        stats = filter(lambda (x, fN): fN>0.0, zip(stats,foldN)) 
    498         stats = [ [x[lrn]/fN for x, fN in stats] for lrn in range(numberOfLearners)] 
     542        stats = [ [x[lrn]/fN for x, fN in stats] for lrn in range(number_of_learners)] 
    499543    else: 
    500544        stats = [ [x/Fn for x, Fn in filter(lambda (x, Fn): Fn > 0.0, zip(lrnD, foldN))] for lrnD in stats] 
     
    511555     
    512556def ME(res, **argkw): 
    513     MEs = [0.0]*res.numberOfLearners 
     557    MEs = [0.0]*res.number_of_learners 
    514558 
    515559    if argkw.get("unweighted", 0) or not res.weights: 
    516560        for tex in res.results: 
    517             MEs = map(lambda res, cls, ac = float(tex.actualClass): 
     561            MEs = map(lambda res, cls, ac = float(tex.actual_class): 
    518562                      res + abs(float(cls) - ac), MEs, tex.classes) 
    519563        totweight = gettotsize(res) 
    520564    else: 
    521565        for tex in res.results: 
    522             MEs = map(lambda res, cls, ac = float(tex.actualClass), tw = tex.weight: 
     566            MEs = map(lambda res, cls, ac = float(tex.actual_class), tw = tex.weight: 
    523567                       res + tw*abs(float(cls) - ac), MEs, tex.classes) 
    524568        totweight = gettotweight(res) 
     
    538582def regression_error(res, **argkw): 
    539583    """regression_error(res) -> regression error (default: MSE)""" 
    540     if argkw.get("SE", 0) and res.numberOfIterations > 1: 
     584    if argkw.get("SE", 0) and res.number_of_iterations > 1: 
    541585        # computes the scores for each iteration, then averages 
    542         scores = [[0.0] * res.numberOfIterations for i in range(res.numberOfLearners)] 
     586        scores = [[0.0] * res.number_of_iterations for i in range(res.number_of_learners)] 
    543587        if argkw.get("norm-abs", 0) or argkw.get("norm-sqr", 0): 
    544             norm = [0.0] * res.numberOfIterations 
    545  
    546         nIter = [0]*res.numberOfIterations       # counts examples in each iteration 
    547         a = [0]*res.numberOfIterations           # average class in each iteration 
     588            norm = [0.0] * res.number_of_iterations 
     589 
     590        nIter = [0]*res.number_of_iterations       # counts examples in each iteration 
     591        a = [0]*res.number_of_iterations           # average class in each iteration 
    548592        for tex in res.results: 
    549             nIter[tex.iterationNumber] += 1 
    550             a[tex.iterationNumber] += float(tex.actualClass) 
    551         a = [a[i]/nIter[i] for i in range(res.numberOfIterations)] 
     593            nIter[tex.iteration_number] += 1 
     594            a[tex.iteration_number] += float(tex.actual_class) 
     595        a = [a[i]/nIter[i] for i in range(res.number_of_iterations)] 
    552596 
    553597        if argkw.get("unweighted", 0) or not res.weights: 
    554598            # iterate accross test cases 
    555599            for tex in res.results: 
    556                 ai = float(tex.actualClass) 
    557                 nIter[tex.iterationNumber] += 1 
     600                ai = float(tex.actual_class) 
     601                nIter[tex.iteration_number] += 1 
    558602 
    559603                # compute normalization, if required 
    560604                if argkw.get("norm-abs", 0): 
    561                     norm[tex.iterationNumber] += abs(ai - a[tex.iterationNumber]) 
     605                    norm[tex.iteration_number] += abs(ai - a[tex.iteration_number]) 
    562606                elif argkw.get("norm-sqr", 0): 
    563                     norm[tex.iterationNumber] += (ai - a[tex.iterationNumber])**2 
     607                    norm[tex.iteration_number] += (ai - a[tex.iteration_number])**2 
    564608 
    565609                # iterate accross results of different regressors 
    566610                for i, cls in enumerate(tex.classes): 
    567611                    if argkw.get("abs", 0): 
    568                         scores[i][tex.iterationNumber] += abs(float(cls) - ai) 
     612                        scores[i][tex.iteration_number] += abs(float(cls) - ai) 
    569613                    else: 
    570                         scores[i][tex.iterationNumber] += (float(cls) - ai)**2 
     614                        scores[i][tex.iteration_number] += (float(cls) - ai)**2 
    571615        else: # unweighted<>0 
    572616            raise NotImplementedError, "weighted error scores with SE not implemented yet" 
     
    586630         
    587631    else: # single iteration (testing on a single test set) 
    588         scores = [0.0] * res.numberOfLearners 
     632        scores = [0.0] * res.number_of_learners 
    589633        norm = 0.0 
    590634 
    591635        if argkw.get("unweighted", 0) or not res.weights: 
    592             a = sum([tex.actualClass for tex in res.results]) \ 
     636            a = sum([tex.actual_class for tex in res.results]) \ 
    593637                / len(res.results) 
    594638            for tex in res.results: 
    595639                if argkw.get("abs", 0): 
    596                     scores = map(lambda res, cls, ac = float(tex.actualClass): 
     640                    scores = map(lambda res, cls, ac = float(tex.actual_class): 
    597641                                 res + abs(float(cls) - ac), scores, tex.classes) 
    598642                else: 
    599                     scores = map(lambda res, cls, ac = float(tex.actualClass): 
     643                    scores = map(lambda res, cls, ac = float(tex.actual_class): 
    600644                                 res + (float(cls) - ac)**2, scores, tex.classes) 
    601645 
    602646                if argkw.get("norm-abs", 0): 
    603                     norm += abs(tex.actualClass - a) 
     647                    norm += abs(tex.actual_class - a) 
    604648                elif argkw.get("norm-sqr", 0): 
    605                     norm += (tex.actualClass - a)**2 
     649                    norm += (tex.actual_class - a)**2 
    606650            totweight = gettotsize(res) 
    607651        else: 
    608652            # UNFINISHED 
    609653            for tex in res.results: 
    610                 MSEs = map(lambda res, cls, ac = float(tex.actualClass), 
     654                MSEs = map(lambda res, cls, ac = float(tex.actual_class), 
    611655                           tw = tex.weight: 
    612656                           res + tw * (float(cls) - ac)**2, MSEs, tex.classes) 
     
    665709def MSE_old(res, **argkw): 
    666710    """MSE(res) -> mean-squared error""" 
    667     if argkw.get("SE", 0) and res.numberOfIterations > 1: 
    668         MSEs = [[0.0] * res.numberOfIterations for i in range(res.numberOfLearners)] 
    669         nIter = [0]*res.numberOfIterations 
     711    if argkw.get("SE", 0) and res.number_of_iterations > 1: 
     712        MSEs = [[0.0] * res.number_of_iterations for i in range(res.number_of_learners)] 
     713        nIter = [0]*res.number_of_iterations 
    670714        if argkw.get("unweighted", 0) or not res.weights: 
    671715            for tex in res.results: 
    672                 ac = float(tex.actualClass) 
    673                 nIter[tex.iterationNumber] += 1 
     716                ac = float(tex.actual_class) 
     717                nIter[tex.iteration_number] += 1 
    674718                for i, cls in enumerate(tex.classes): 
    675                     MSEs[i][tex.iterationNumber] += (float(cls) - ac)**2 
     719                    MSEs[i][tex.iteration_number] += (float(cls) - ac)**2 
    676720        else: 
    677721            raise ValueError, "weighted RMSE with SE not implemented yet" 
     
    682726         
    683727    else: 
    684         MSEs = [0.0]*res.numberOfLearners 
     728        MSEs = [0.0]*res.number_of_learners 
    685729        if argkw.get("unweighted", 0) or not res.weights: 
    686730            for tex in res.results: 
    687                 MSEs = map(lambda res, cls, ac = float(tex.actualClass): 
     731                MSEs = map(lambda res, cls, ac = float(tex.actual_class): 
    688732                           res + (float(cls) - ac)**2, MSEs, tex.classes) 
    689733            totweight = gettotsize(res) 
    690734        else: 
    691735            for tex in res.results: 
    692                 MSEs = map(lambda res, cls, ac = float(tex.actualClass), tw = tex.weight: 
     736                MSEs = map(lambda res, cls, ac = float(tex.actual_class), tw = tex.weight: 
    693737                           res + tw * (float(cls) - ac)**2, MSEs, tex.classes) 
    694738            totweight = gettotweight(res) 
     
    730774    estimated using the latter method. 
    731775    """ 
    732     if res.numberOfIterations==1: 
     776    if res.number_of_iterations==1: 
    733777        if type(res)==ConfusionMatrix: 
    734778            div = nm.TP+nm.FN+nm.FP+nm.TN 
     
    736780            ca = [(nm.TP+nm.TN)/div] 
    737781        else: 
    738             CAs = [0.0]*res.numberOfLearners 
     782            CAs = [0.0]*res.number_of_learners 
    739783            if argkw.get("unweighted", 0) or not res.weights: 
    740784                totweight = gettotsize(res) 
    741785                for tex in res.results: 
    742                     CAs = map(lambda res, cls: res+(cls==tex.actualClass), CAs, tex.classes) 
     786                    CAs = map(lambda res, cls: res+(cls==tex.actual_class), CAs, tex.classes) 
    743787            else: 
    744788                totweight = 0. 
    745789                for tex in res.results: 
    746                     CAs = map(lambda res, cls: res+(cls==tex.actualClass and tex.weight), CAs, tex.classes) 
     790                    CAs = map(lambda res, cls: res+(cls==tex.actual_class and tex.weight), CAs, tex.classes) 
    747791                    totweight += tex.weight 
    748792            check_non_zero(totweight) 
     
    755799         
    756800    else: 
    757         CAsByFold = [[0.0]*res.numberOfIterations for i in range(res.numberOfLearners)] 
    758         foldN = [0.0]*res.numberOfIterations 
     801        CAsByFold = [[0.0]*res.number_of_iterations for i in range(res.number_of_learners)] 
     802        foldN = [0.0]*res.number_of_iterations 
    759803 
    760804        if argkw.get("unweighted", 0) or not res.weights: 
    761805            for tex in res.results: 
    762                 for lrn in range(res.numberOfLearners): 
    763                     CAsByFold[lrn][tex.iterationNumber] += (tex.classes[lrn]==tex.actualClass) 
    764                 foldN[tex.iterationNumber] += 1 
     806                for lrn in range(res.number_of_learners): 
     807                    CAsByFold[lrn][tex.iteration_number] += (tex.classes[lrn]==tex.actual_class) 
     808                foldN[tex.iteration_number] += 1 
    765809        else: 
    766810            for tex in res.results: 
    767                 for lrn in range(res.numberOfLearners): 
    768                     CAsByFold[lrn][tex.iterationNumber] += (tex.classes[lrn]==tex.actualClass) and tex.weight 
    769                 foldN[tex.iterationNumber] += tex.weight 
     811                for lrn in range(res.number_of_learners): 
     812                    CAsByFold[lrn][tex.iteration_number] += (tex.classes[lrn]==tex.actual_class) and tex.weight 
     813                foldN[tex.iteration_number] += tex.weight 
    770814 
    771815        return statistics_by_folds(CAsByFold, foldN, reportSE, False) 
     
    779823def AP(res, reportSE = False, **argkw): 
    780824    """ Computes the average probability assigned to the correct class. """ 
    781     if res.numberOfIterations == 1: 
    782         APs=[0.0]*res.numberOfLearners 
     825    if res.number_of_iterations == 1: 
     826        APs=[0.0]*res.number_of_learners 
    783827        if argkw.get("unweighted", 0) or not res.weights: 
    784828            for tex in res.results: 
    785                 APs = map(lambda res, probs: res + probs[tex.actualClass], APs, tex.probabilities) 
     829                APs = map(lambda res, probs: res + probs[tex.actual_class], APs, tex.probabilities) 
    786830            totweight = gettotsize(res) 
    787831        else: 
    788832            totweight = 0. 
    789833            for tex in res.results: 
    790                 APs = map(lambda res, probs: res + probs[tex.actualClass]*tex.weight, APs, tex.probabilities) 
     834                APs = map(lambda res, probs: res + probs[tex.actual_class]*tex.weight, APs, tex.probabilities) 
    791835                totweight += tex.weight 
    792836        check_non_zero(totweight) 
    793837        return [AP/totweight for AP in APs] 
    794838 
    795     APsByFold = [[0.0]*res.numberOfLearners for i in range(res.numberOfIterations)] 
    796     foldN = [0.0] * res.numberOfIterations 
     839    APsByFold = [[0.0]*res.number_of_learners for i in range(res.number_of_iterations)] 
     840    foldN = [0.0] * res.number_of_iterations 
    797841    if argkw.get("unweighted", 0) or not res.weights: 
    798842        for tex in res.results: 
    799             APsByFold[tex.iterationNumber] = map(lambda res, probs: res + probs[tex.actualClass], APsByFold[tex.iterationNumber], tex.probabilities) 
    800             foldN[tex.iterationNumber] += 1 
     843            APsByFold[tex.iteration_number] = map(lambda res, probs: res + probs[tex.actual_class], APsByFold[tex.iteration_number], tex.probabilities) 
     844            foldN[tex.iteration_number] += 1 
    801845    else: 
    802846        for tex in res.results: 
    803             APsByFold[tex.iterationNumber] = map(lambda res, probs: res + probs[tex.actualClass] * tex.weight, APsByFold[tex.iterationNumber], tex.probabilities) 
    804             foldN[tex.iterationNumber] += tex.weight 
     847            APsByFold[tex.iteration_number] = map(lambda res, probs: res + probs[tex.actual_class] * tex.weight, APsByFold[tex.iteration_number], tex.probabilities) 
     848            foldN[tex.iteration_number] += tex.weight 
    805849 
    806850    return statistics_by_folds(APsByFold, foldN, reportSE, True) 
     
    823867    # We take max(result, 0) to avoid -0.0000x due to rounding errors 
    824868 
    825     if res.numberOfIterations == 1: 
    826         MSEs=[0.0]*res.numberOfLearners 
     869    if res.number_of_iterations == 1: 
     870        MSEs=[0.0]*res.number_of_learners 
    827871        if argkw.get("unweighted", 0) or not res.weights: 
    828872            totweight = 0.0 
    829873            for tex in res.results: 
    830874                MSEs = map(lambda res, probs: 
    831                            res + reduce(lambda s, pi: s+pi**2, probs, 0) - 2*probs[tex.actualClass], MSEs, tex.probabilities) 
     875                           res + reduce(lambda s, pi: s+pi**2, probs, 0) - 2*probs[tex.actual_class], MSEs, tex.probabilities) 
    832876                totweight += tex.weight 
    833877        else: 
    834878            for tex in res.results: 
    835879                MSEs = map(lambda res, probs: 
    836                            res + tex.weight*reduce(lambda s, pi: s+pi**2, probs, 0) - 2*probs[tex.actualClass], MSEs, tex.probabilities) 
     880                           res + tex.weight*reduce(lambda s, pi: s+pi**2, probs, 0) - 2*probs[tex.actual_class], MSEs, tex.probabilities) 
    837881            totweight = gettotweight(res) 
    838882        check_non_zero(totweight) 
     
    842886            return [max(x/totweight+1.0, 0) for x in MSEs] 
    843887 
    844     BSs = [[0.0]*res.numberOfLearners for i in range(res.numberOfIterations)] 
    845     foldN = [0.] * res.numberOfIterations 
     888    BSs = [[0.0]*res.number_of_learners for i in range(res.number_of_iterations)] 
     889    foldN = [0.] * res.number_of_iterations 
    846890 
    847891    if argkw.get("unweighted", 0) or not res.weights: 
    848892        for tex in res.results: 
    849             BSs[tex.iterationNumber] = map(lambda rr, probs: 
    850                        rr + reduce(lambda s, pi: s+pi**2, probs, 0) - 2*probs[tex.actualClass], BSs[tex.iterationNumber], tex.probabilities) 
    851             foldN[tex.iterationNumber] += 1 
     893            BSs[tex.iteration_number] = map(lambda rr, probs: 
     894                       rr + reduce(lambda s, pi: s+pi**2, probs, 0) - 2*probs[tex.actual_class], BSs[tex.iteration_number], tex.probabilities) 
     895            foldN[tex.iteration_number] += 1 
    852896    else: 
    853897        for tex in res.results: 
    854             BSs[tex.iterationNumber] = map(lambda res, probs: 
    855                        res + tex.weight*reduce(lambda s, pi: s+pi**2, probs, 0) - 2*probs[tex.actualClass], BSs[tex.iterationNumber], tex.probabilities) 
    856             foldN[tex.iterationNumber] += tex.weight 
     898            BSs[tex.iteration_number] = map(lambda res, probs: 
     899                       res + tex.weight*reduce(lambda s, pi: s+pi**2, probs, 0) - 2*probs[tex.actual_class], BSs[tex.iteration_number], tex.probabilities) 
     900            foldN[tex.iteration_number] += tex.weight 
    857901 
    858902    stats = statistics_by_folds(BSs, foldN, reportSE, True) 
     
    883927        apriori = class_probabilities_from_res(res) 
    884928 
    885     if res.numberOfIterations==1: 
    886         ISs = [0.0]*res.numberOfLearners 
     929    if res.number_of_iterations==1: 
     930        ISs = [0.0]*res.number_of_learners 
    887931        if argkw.get("unweighted", 0) or not res.weights: 
    888932            for tex in res.results: 
    889933              for i in range(len(tex.probabilities)): 
    890                     cls = tex.actualClass 
     934                    cls = tex.actual_class 
    891935                    ISs[i] += IS_ex(tex.probabilities[i][cls], apriori[cls]) 
    892936            totweight = gettotsize(res) 
     
    894938            for tex in res.results: 
    895939              for i in range(len(tex.probabilities)): 
    896                     cls = tex.actualClass 
     940                    cls = tex.actual_class 
    897941                    ISs[i] += IS_ex(tex.probabilities[i][cls], apriori[cls]) * tex.weight 
    898942            totweight = gettotweight(res) 
     
    903947 
    904948         
    905     ISs = [[0.0]*res.numberOfIterations for i in range(res.numberOfLearners)] 
    906     foldN = [0.] * res.numberOfIterations 
     949    ISs = [[0.0]*res.number_of_iterations for i in range(res.number_of_learners)] 
     950    foldN = [0.] * res.number_of_iterations 
    907951 
    908952    # compute info scores for each fold     
     
    910954        for tex in res.results: 
    911955            for i in range(len(tex.probabilities)): 
    912                 cls = tex.actualClass 
    913                 ISs[i][tex.iterationNumber] += IS_ex(tex.probabilities[i][cls], apriori[cls]) 
    914             foldN[tex.iterationNumber] += 1 
     956                cls = tex.actual_class 
     957                ISs[i][tex.iteration_number] += IS_ex(tex.probabilities[i][cls], apriori[cls]) 
     958            foldN[tex.iteration_number] += 1 
    915959    else: 
    916960        for tex in res.results: 
    917961            for i in range(len(tex.probabilities)): 
    918                 cls = tex.actualClass 
    919                 ISs[i][tex.iterationNumber] += IS_ex(tex.probabilities[i][cls], apriori[cls]) * tex.weight 
    920             foldN[tex.iterationNumber] += tex.weight 
     962                cls = tex.actual_class 
     963                ISs[i][tex.iteration_number] += IS_ex(tex.probabilities[i][cls], apriori[cls]) * tex.weight 
     964            foldN[tex.iteration_number] += tex.weight 
    921965 
    922966    return statistics_by_folds(ISs, foldN, reportSE, False) 
     
    932976            sums = ranks 
    933977            k = len(sums) 
    934     N = res.numberOfIterations 
     978    N = res.number_of_iterations 
    935979    k = len(sums) 
    936980    T = sum([x*x for x in sums]) 
     
    9891033    compatibility issues. 
    9901034    """ 
    991     tfpns = [ConfusionMatrix() for i in range(res.numberOfLearners)] 
     1035    tfpns = [ConfusionMatrix() for i in range(res.number_of_learners)] 
    9921036     
    9931037    if classIndex<0: 
    994         numberOfClasses = len(res.classValues) 
     1038        numberOfClasses = len(res.class_values) 
    9951039        if classIndex < -1 or numberOfClasses > 2: 
    996             cm = [[[0.0] * numberOfClasses for i in range(numberOfClasses)] for l in range(res.numberOfLearners)] 
     1040            cm = [[[0.0] * numberOfClasses for i in range(numberOfClasses)] for l in range(res.number_of_learners)] 
    9971041            if argkw.get("unweighted", 0) or not res.weights: 
    9981042                for tex in res.results: 
    999                     trueClass = int(tex.actualClass) 
     1043                    trueClass = int(tex.actual_class) 
    10001044                    for li, pred in enumerate(tex.classes): 
    10011045                        predClass = int(pred) 
     
    10041048            else: 
    10051049                for tex in enumerate(res.results): 
    1006                     trueClass = int(tex.actualClass) 
     1050                    trueClass = int(tex.actual_class) 
    10071051                    for li, pred in tex.classes: 
    10081052                        predClass = int(pred) 
     
    10201064        if argkw.get("unweighted", 0) or not res.weights: 
    10211065            for lr in res.results: 
    1022                 isPositive=(lr.actualClass==classIndex) 
    1023                 for i in range(res.numberOfLearners): 
     1066                isPositive=(lr.actual_class==classIndex) 
     1067                for i in range(res.number_of_learners): 
    10241068                    tfpns[i].addTFPosNeg(lr.probabilities[i][classIndex]>cutoff, isPositive) 
    10251069        else: 
    10261070            for lr in res.results: 
    1027                 isPositive=(lr.actualClass==classIndex) 
    1028                 for i in range(res.numberOfLearners): 
     1071                isPositive=(lr.actual_class==classIndex) 
     1072                for i in range(res.number_of_learners): 
    10291073                    tfpns[i].addTFPosNeg(lr.probabilities[i][classIndex]>cutoff, isPositive, lr.weight) 
    10301074    else: 
    10311075        if argkw.get("unweighted", 0) or not res.weights: 
    10321076            for lr in res.results: 
    1033                 isPositive=(lr.actualClass==classIndex) 
    1034                 for i in range(res.numberOfLearners): 
     1077                isPositive=(lr.actual_class==classIndex) 
     1078                for i in range(res.number_of_learners): 
    10351079                    tfpns[i].addTFPosNeg(lr.classes[i]==classIndex, isPositive) 
    10361080        else: 
    10371081            for lr in res.results: 
    1038                 isPositive=(lr.actualClass==classIndex) 
    1039                 for i in range(res.numberOfLearners): 
     1082                isPositive=(lr.actual_class==classIndex) 
     1083                for i in range(res.number_of_learners): 
    10401084                    tfpns[i].addTFPosNeg(lr.classes[i]==classIndex, isPositive, lr.weight) 
    10411085    return tfpns 
     
    15551599    import corn 
    15561600    ## merge multiple iterations into one 
    1557     mres = Orange.evaluation.testing.ExperimentResults(1, res.classifierNames, res.classValues, res.weights, classifiers=res.classifiers, loaded=res.loaded) 
     1601    mres = Orange.evaluation.testing.ExperimentResults(1, res.classifier_names, res.class_values, res.weights, classifiers=res.classifiers, loaded=res.loaded) 
    15581602    for te in res.results: 
    15591603        mres.results.append( te ) 
     
    16171661    import corn 
    16181662    ## merge multiple iterations into one 
    1619     mres = Orange.evaluation.testing.ExperimentResults(1, res.classifierNames, res.classValues, res.weights, classifiers=res.classifiers, loaded=res.loaded) 
     1663    mres = Orange.evaluation.testing.ExperimentResults(1, res.classifier_names, res.class_values, res.weights, classifiers=res.classifiers, loaded=res.loaded) 
    16201664    for te in res.results: 
    16211665        mres.results.append( te ) 
     
    16611705    weightByClasses = argkw.get("weightByClasses", True) 
    16621706 
    1663     if (res.numberOfIterations>1): 
    1664         CDTs = [CDT() for i in range(res.numberOfLearners)] 
     1707    if (res.number_of_iterations>1): 
     1708        CDTs = [CDT() for i in range(res.number_of_learners)] 
    16651709        iterationExperiments = split_by_iterations(res) 
    16661710        for exp in iterationExperiments: 
     
    16701714                CDTs[i].D += expCDTs[i].D 
    16711715                CDTs[i].T += expCDTs[i].T 
    1672         for i in range(res.numberOfLearners): 
     1716        for i in range(res.number_of_learners): 
    16731717            if is_CDT_empty(CDTs[0]): 
    16741718                return corn.computeCDT(res, classIndex, useweights) 
     
    17501794# in these cases the result is returned immediately 
    17511795def AUC_iterations(AUCcomputer, iterations, computerArgs): 
    1752     subsum_aucs = [0.] * iterations[0].numberOfLearners 
     1796    subsum_aucs = [0.] * iterations[0].number_of_learners 
    17531797    for ite in iterations: 
    17541798        aucs, foldsUsed = AUCcomputer(*(ite, ) + computerArgs) 
     
    17631807# AUC for binary classification problems 
    17641808def AUC_binary(res, useWeights = True): 
    1765     if res.numberOfIterations > 1: 
    1766         return AUC_iterations(AUC_i, split_by_iterations(res), (-1, useWeights, res, res.numberOfIterations)) 
     1809    if res.number_of_iterations > 1: 
     1810        return AUC_iterations(AUC_i, split_by_iterations(res), (-1, useWeights, res, res.number_of_iterations)) 
    17671811    else: 
    17681812        return AUC_i(res, -1, useWeights)[0] 
     
    17701814# AUC for multiclass problems 
    17711815def AUC_multi(res, useWeights = True, method = 0): 
    1772     numberOfClasses = len(res.classValues) 
    1773      
    1774     if res.numberOfIterations > 1: 
     1816    numberOfClasses = len(res.class_values) 
     1817     
     1818    if res.number_of_iterations > 1: 
    17751819        iterations = split_by_iterations(res) 
    17761820        all_ite = res 
     
    17801824     
    17811825    # by pairs 
    1782     sum_aucs = [0.] * res.numberOfLearners 
     1826    sum_aucs = [0.] * res.number_of_learners 
    17831827    usefulClassPairs = 0. 
    17841828 
     
    17891833        for classIndex1 in range(numberOfClasses): 
    17901834            for classIndex2 in range(classIndex1): 
    1791                 subsum_aucs = AUC_iterations(AUC_ij, iterations, (classIndex1, classIndex2, useWeights, all_ite, res.numberOfIterations)) 
     1835                subsum_aucs = AUC_iterations(AUC_ij, iterations, (classIndex1, classIndex2, useWeights, all_ite, res.number_of_iterations)) 
    17921836                if subsum_aucs: 
    17931837                    if method == 0: 
     
    18001844    else: 
    18011845        for classIndex in range(numberOfClasses): 
    1802             subsum_aucs = AUC_iterations(AUC_i, iterations, (classIndex, useWeights, all_ite, res.numberOfIterations)) 
     1846            subsum_aucs = AUC_iterations(AUC_i, iterations, (classIndex, useWeights, all_ite, res.number_of_iterations)) 
    18031847            if subsum_aucs: 
    18041848                if method == 0: 
     
    18271871    average, as specified by the argument method. 
    18281872    """ 
    1829     if len(res.classValues) < 2: 
     1873    if len(res.class_values) < 2: 
    18301874        raise ValueError("Cannot compute AUC on a single-class problem") 
    1831     elif len(res.classValues) == 2: 
     1875    elif len(res.class_values) == 2: 
    18321876        return AUC_binary(res, useWeights) 
    18331877    else: 
     
    18571901            classIndex = 1 
    18581902 
    1859     if res.numberOfIterations > 1: 
    1860         return AUC_iterations(AUC_i, split_by_iterations(res), (classIndex, useWeights, res, res.numberOfIterations)) 
     1903    if res.number_of_iterations > 1: 
     1904        return AUC_iterations(AUC_i, split_by_iterations(res), (classIndex, useWeights, res, res.number_of_iterations)) 
    18611905    else: 
    18621906        return AUC_i( res, classIndex, useWeights)[0] 
     
    18681912    other classes. 
    18691913    """ 
    1870     if res.numberOfIterations > 1: 
    1871         return AUC_iterations(AUC_ij, split_by_iterations(res), (classIndex1, classIndex2, useWeights, res, res.numberOfIterations)) 
     1914    if res.number_of_iterations > 1: 
     1915        return AUC_iterations(AUC_ij, split_by_iterations(res), (classIndex1, classIndex2, useWeights, res, res.number_of_iterations)) 
    18721916    else: 
    18731917        return AUC_ij(res, classIndex1, classIndex2, useWeights) 
     
    18871931            print ("%s" + ("\t%5.3f" * len(AUCrow))) % ((className, ) + tuple(AUCrow)) 
    18881932    """ 
    1889     numberOfClasses = len(res.classValues) 
    1890     numberOfLearners = res.numberOfLearners 
    1891      
    1892     if res.numberOfIterations > 1: 
     1933    numberOfClasses = len(res.class_values) 
     1934    number_of_learners = res.number_of_learners 
     1935     
     1936    if res.number_of_iterations > 1: 
    18931937        iterations, all_ite = split_by_iterations(res), res 
    18941938    else: 
    18951939        iterations, all_ite = [res], None 
    18961940     
    1897     aucs = [[[] for i in range(numberOfClasses)] for i in range(numberOfLearners)] 
     1941    aucs = [[[] for i in range(numberOfClasses)] for i in range(number_of_learners)] 
    18981942    prob = class_probabilities_from_res(res) 
    18991943         
    19001944    for classIndex1 in range(numberOfClasses): 
    19011945        for classIndex2 in range(classIndex1): 
    1902             pair_aucs = AUC_iterations(AUC_ij, iterations, (classIndex1, classIndex2, useWeights, all_ite, res.numberOfIterations)) 
     1946            pair_aucs = AUC_iterations(AUC_ij, iterations, (classIndex1, classIndex2, useWeights, all_ite, res.number_of_iterations)) 
    19031947            if pair_aucs: 
    1904                 for lrn in range(numberOfLearners): 
     1948                for lrn in range(number_of_learners): 
    19051949                    aucs[lrn][classIndex1].append(pair_aucs[lrn]) 
    19061950            else: 
    1907                 for lrn in range(numberOfLearners): 
     1951                for lrn in range(number_of_learners): 
    19081952                    aucs[lrn][classIndex1].append(-1) 
    19091953    return aucs 
     
    19151959    one degree of freedom; critical value for 5% significance is around 3.84. 
    19161960    """ 
    1917     nLearners = res.numberOfLearners 
     1961    nLearners = res.number_of_learners 
    19181962    mcm = [] 
    19191963    for i in range(nLearners): 
    1920        mcm.append([0.0]*res.numberOfLearners) 
     1964       mcm.append([0.0]*res.number_of_learners) 
    19211965 
    19221966    if not res.weights or argkw.get("unweighted"): 
    19231967        for i in res.results: 
    1924             actual = i.actualClass 
     1968            actual = i.actual_class 
    19251969            classes = i.classes 
    19261970            for l1 in range(nLearners): 
     
    19331977    else: 
    19341978        for i in res.results: 
    1935             actual = i.actualClass 
     1979            actual = i.actual_class 
    19361980            classes = i.classes 
    19371981            for l1 in range(nLearners): 
     
    19642008    if not res.weights or argkw.get("unweighted"): 
    19652009        for i in res.results: 
    1966             actual=i.actualClass 
     2010            actual=i.actual_class 
    19672011            if i.classes[lrn1]==actual: 
    19682012                if i.classes[lrn2]!=actual: 
     
    19722016    else: 
    19732017        for i in res.results: 
    1974             actual=i.actualClass 
     2018            actual=i.actual_class 
    19752019            if i.classes[lrn1]==actual: 
    19762020                if i.classes[lrn2]!=actual: 
     
    25522596    print_figure(fig, filename, **kwargs) 
    25532597 
     2598def mlc_hamming_loss(res): 
     2599    """ 
     2600    Schapire and Singer (2000) presented Hamming Loss, which id defined as:  
     2601     
     2602    :math:`HammingLoss(H,D)=\\frac{1}{|D|} \\sum_{i=1}^{|D|} \\frac{Y_i \\vartriangle Z_i}{|L|}` 
     2603    """ 
     2604    losses = [0]*res.number_of_learners 
     2605    label_num = len(res.labels) 
     2606    example_num = gettotsize(res) 
     2607     
     2608    for e in res.results: 
     2609        aclass = e.actual_class 
     2610        for i, labels in enumerate(e.classes): 
     2611            labels = map(int, labels) 
     2612            if len(labels) <> len(aclass): 
     2613                raise ValueError, "The dimensions of the classified output and the actual class array do not match." 
     2614            for j in range(label_num): 
     2615                if labels[j] != aclass[j]: 
     2616                    losses[i] += 1 
     2617             
     2618    return [float(x)/(label_num*example_num) for x in losses] 
     2619 
     2620def mlc_accuracy(res, forgiveness_rate = 1.0): 
     2621    """ 
     2622    Godbole & Sarawagi, 2004 uses the metrics accuracy, precision, recall as follows: 
     2623      
     2624    :math:`Accuracy(H,D)=\\frac{1}{|D|} \\sum_{i=1}^{|D|} \\frac{|Y_i \\cap Z_i|}{|Y_i \\cup Z_i|}` 
     2625     
     2626    Boutell et al. (2004) give a more generalized version using a parameter :math:`\\alpha \\ge 0`,  
     2627    called forgiveness rate: 
     2628     
     2629    :math:`Accuracy(H,D)=\\frac{1}{|D|} \\sum_{i=1}^{|D|} (\\frac{|Y_i \\cap Z_i|}{|Y_i \\cup Z_i|})^{\\alpha}` 
     2630    """ 
     2631    accuracies = [0.0]*res.number_of_learners 
     2632    label_num = len(res.labels) 
     2633    example_num = gettotsize(res) 
     2634     
     2635    for e in res.results: 
     2636        aclass = e.actual_class 
     2637        for i, labels in enumerate(e.classes): 
     2638            labels = map(int, labels) 
     2639            if len(labels) <> len(aclass): 
     2640                raise ValueError, "The dimensions of the classified output and the actual class array do not match." 
     2641             
     2642            intersection = 0.0 
     2643            union = 0.0 
     2644            for real, pred in zip(labels, aclass): 
     2645                if real and pred: 
     2646                    intersection = intersection+1 
     2647                if real or pred: 
     2648                    union = union+1 
     2649 
     2650            if union != 0: 
     2651                accuracies[i] = accuracies[i] + intersection/union 
     2652             
     2653    return [math.pow(x/example_num,forgiveness_rate) for x in accuracies] 
     2654 
     2655def mlc_precision(res): 
     2656    """ 
     2657    :math:`Precision(H,D)=\\frac{1}{|D|} \\sum_{i=1}^{|D|} \\frac{|Y_i \\cap Z_i|}{|Z_i|}` 
     2658    """ 
     2659    precisions = [0.0]*res.number_of_learners 
     2660    label_num = len(res.labels) 
     2661    example_num = gettotsize(res) 
     2662     
     2663    for e in res.results: 
     2664        aclass = e.actual_class 
     2665        for i, labels in enumerate(e.classes): 
     2666            labels = map(int, labels) 
     2667            if len(labels) <> len(aclass): 
     2668                raise ValueError, "The dimensions of the classified output and the actual class array do not match." 
     2669             
     2670            intersection = 0.0 
     2671            predicted = 0.0 
     2672            for real, pred in zip(labels, aclass): 
     2673                if real and pred: 
     2674                    intersection = intersection+1 
     2675                if real: 
     2676                    predicted = predicted + 1 
     2677            if predicted <> 0: 
     2678                precisions[i] = precisions[i] + intersection/predicted 
     2679             
     2680    return [x/example_num for x in precisions] 
     2681 
     2682def mlc_recall(res): 
     2683    """ 
     2684    :math:`Recall(H,D)=\\frac{1}{|D|} \\sum_{i=1}^{|D|} \\frac{|Y_i \\cap Z_i|}{|Y_i|}` 
     2685    """ 
     2686    recalls = [0.0]*res.number_of_learners 
     2687    label_num = len(res.labels) 
     2688    example_num = gettotsize(res) 
     2689     
     2690    for e in res.results: 
     2691        aclass = e.actual_class 
     2692        for i, labels in enumerate(e.classes): 
     2693            labels = map(int, labels) 
     2694            if len(labels) <> len(aclass): 
     2695                raise ValueError, "The dimensions of the classified output and the actual class array do not match." 
     2696             
     2697            intersection = 0.0 
     2698            actual = 0.0 
     2699            for real, pred in zip(labels, aclass): 
     2700                if real and pred: 
     2701                    intersection = intersection+1 
     2702                if pred: 
     2703                    actual = actual + 1 
     2704            if actual <> 0: 
     2705                recalls[i] = recalls[i] + intersection/actual 
     2706             
     2707    return [x/example_num for x in recalls] 
     2708 
     2709#def mlc_ranking_loss(res): 
     2710#    pass 
     2711# 
     2712#def mlc_average_precision(res): 
     2713#    pass 
     2714# 
     2715#def mlc_hierarchical_loss(res): 
     2716#    pass 
     2717 
     2718######################################################################################### 
    25542719if __name__ == "__main__": 
    25552720    avranks =  [3.143, 2.000, 2.893, 1.964] 
  • orange/Orange/evaluation/scoring.py

    r9504 r9505  
    405405.. autofunction:: split_by_iterations 
    406406 
    407 ====================== 
    408 Scoring for multilabel 
    409 ====================== 
     407===================================== 
     408Scoring for multilabel classification 
     409===================================== 
    410410 
    411411Multi-label classification requries different metrics than those used in traditional single-label  
  • orange/Orange/evaluation/testing.py

    r9331 r9505  
    22 
    33import Orange 
    4 from Orange.misc import demangle_examples, getobjectname, printVerbose, deprecated_keywords 
     4from Orange.misc import demangle_examples, getobjectname, printVerbose, deprecated_keywords, deprecated_members 
    55 
    66#### Data structures 
     7 
     8TEST_TYPE_SINGLE = 0 
     9TEST_TYPE_MLC = 1 
    710 
    811class TestedExample: 
     
    1215    :var classes: A list of predictions of type Value, one for each classifier. 
    1316    :var probabilities: A list of probabilities of classes, one for each classifier. 
    14     :var iterationNumber: Iteration number (e.g. fold) in which the TestedExample was created/tested. 
    15     :var actualClass: The correct class of the example 
     17    :var iteration_number: Iteration number (e.g. fold) in which the TestedExample was created/tested. 
     18    :var actual_class: The correct class of the example 
    1619    :var weight: Example's weight. Even if the example set was not weighted, this attribute is present and equals 1.0. 
    1720    """ 
    1821 
    19     def __init__(self, iterationNumber=None, actualClass=None, n=0, weight=1.0): 
    20         """ 
    21         :param iterationNumber: 
    22         :param actualClass: 
     22    @deprecated_keywords({"iteration_number": "iteration_number", 
     23                          "actual_class": "actual_class"}) 
     24    def __init__(self, iteration_number=None, actual_class=None, n=0, weight=1.0): 
     25        """ 
     26        :param iteration_number: 
     27        :param actual_class: 
    2328        :param n: 
    2429        :param weight: 
     
    2631        self.classes = [None]*n 
    2732        self.probabilities = [None]*n 
    28         self.iterationNumber = iterationNumber 
    29         self.actualClass= actualClass 
     33        self.iteration_number = iteration_number 
     34        self.actual_class= actual_class 
    3035        self.weight = weight 
    3136 
     
    3338        """Appends a new result (class and probability prediction by a single classifier) to the classes and probabilities field.""" 
    3439     
    35         if type(aclass.value)==float: 
     40        if type(aclass)==list: 
     41            self.classes.append(aclass) 
     42            self.probabilities.append(aprob) 
     43        elif type(aclass.value)==float: 
    3644            self.classes.append(float(aclass)) 
    3745            self.probabilities.append(aprob) 
     
    4250    def set_result(self, i, aclass, aprob): 
    4351        """Sets the result of the i-th classifier to the given values.""" 
    44         if type(aclass.value)==float: 
     52        if type(aclass)==list: 
     53            self.classes[i] = aclass 
     54            self.probabilities[i] = aprob 
     55        elif type(aclass.value)==float: 
    4556            self.classes[i] = float(aclass) 
    4657            self.probabilities[i] = aprob 
     
    5263        return str(self.__dict__) 
    5364 
     65TestedExample = deprecated_members({"iterationNumber": "iteration_number", 
     66                                    "actualClass": "actual_class" 
     67                                    })(TestedExample) 
     68 
    5469class ExperimentResults(object): 
    5570    """ 
     
    6176    :var classifiers: A list of classifiers, one element for each repetition (eg. fold). Each element is a list 
    6277      of classifiers, one for each learner. This field is used only if storing is enabled by ``storeClassifiers=1``. 
    63     :var numberOfIterations: Number of iterations. This can be the number of folds (in cross validation) 
    64       or the number of repetitions of some test. ``TestedExample``'s attribute ``iterationNumber`` should 
    65       be in range ``[0, numberOfIterations-1]``. 
    66     :var numberOfLearners: Number of learners. Lengths of lists classes and probabilities in each :obj:`TestedExample` 
    67       should equal ``numberOfLearners``. 
     78    :var number_of_iterations: Number of iterations. This can be the number of folds (in cross validation) 
     79      or the number of repetitions of some test. ``TestedExample``'s attribute ``iteration_number`` should 
     80      be in range ``[0, number_of_iterations-1]``. 
     81    :var number_of_learners: Number of learners. Lengths of lists classes and probabilities in each :obj:`TestedExample` 
     82      should equal ``number_of_learners``. 
    6883    :var loaded: If the experimental method supports caching and there are no obstacles for caching (such as unknown 
    6984      random seeds), this is a list of boolean values. Each element corresponds to a classifier and tells whether the 
     
    7388      testing examples but you would like to ignore the weights in statistics. 
    7489    """ 
    75     def __init__(self, iterations, classifierNames, classValues=None, weights=None, baseClass=-1, domain=None, **argkw): 
    76         self.classValues = classValues 
    77         self.classifierNames = classifierNames 
    78         self.numberOfIterations = iterations 
    79         self.numberOfLearners = len(classifierNames) 
     90    @deprecated_keywords({"classifierNames": "classifier_names", 
     91                          "classValues": "class_values", 
     92                          "baseClass": "base_class", 
     93                          "numberOfIterations": "number_of_iterations", 
     94                          "numberOfLearners": "number_of_learners"}) 
     95    def __init__(self, iterations, classifier_names, class_values=None, weights=None, base_class=-1, domain=None, test_type=TEST_TYPE_SINGLE, **argkw): 
     96        self.class_values = class_values 
     97        self.classifier_names = classifier_names 
     98        self.number_of_iterations = iterations 
     99        self.number_of_learners = len(classifier_names) 
    80100        self.results = [] 
    81101        self.classifiers = [] 
    82102        self.loaded = None 
    83         self.baseClass = baseClass 
     103        self.base_class = base_class 
    84104        self.weights = weights 
     105        self.test_type = test_type 
    85106 
    86107        if domain is not None: 
    87             if domain.classVar.varType == Orange.data.Type.Discrete: 
    88                 self.classValues = list(domain.classVar.values) 
    89                 self.baseClass = domain.classVar.base_value 
    90                 self.converter = int 
    91             else: 
    92                 self.baseClass = self.classValues = None 
    93                 self.converter = float 
     108            self.base_class = self.class_values = None 
     109            if test_type==TEST_TYPE_SINGLE: 
     110                if domain.class_var.var_type == Orange.data.Type.Discrete: 
     111                    self.class_values = list(domain.class_var.values) 
     112                    self.base_class = domain.class_var.base_value 
     113                    self.converter = int 
     114                else: 
     115                    self.converter = float 
     116            elif test_type==TEST_TYPE_MLC: 
     117                self.labels = [var.name for var in domain.class_vars] 
     118                self.converter = lambda vals: [int(val) if val.variable.var_type == Orange.data.Type.Discrete 
     119                                               else float(val) for val in vals] 
    94120 
    95121        self.__dict__.update(argkw) 
     
    102128 
    103129    def create_tested_example(self, fold, example): 
     130        actual = [example.getclass, example.get_classes][self.test_type]() 
    104131        return TestedExample(fold, 
    105                              self.converter(example.getclass()), 
    106                              self.numberOfLearners, 
     132                             self.converter(actual), 
     133                             self.number_of_learners, 
    107134                             example.getweight(self.weights)) 
    108135 
     
    112139            del r.classes[index] 
    113140            del r.probabilities[index] 
    114         del self.classifierNames[index] 
    115         self.numberOfLearners -= 1 
     141        del self.classifier_names[index] 
     142        self.number_of_learners -= 1 
    116143 
    117144    def add(self, results, index, replace=-1): 
     
    119146        if len(self.results)!=len(results.results): 
    120147            raise SystemError("mismatch in number of test cases") 
    121         if self.numberOfIterations!=results.numberOfIterations: 
     148        if self.number_of_iterations!=results.number_of_iterations: 
    122149            raise SystemError("mismatch in number of iterations (%d<>%d)" % \ 
    123                   (self.numberOfIterations, results.numberOfIterations)) 
     150                  (self.number_of_iterations, results.number_of_iterations)) 
    124151        if len(self.classifiers) and len(results.classifiers)==0: 
    125152            raise SystemError("no classifiers in results") 
    126153 
    127         if replace < 0 or replace >= self.numberOfLearners: # results for new learner 
    128             self.classifierNames.append(results.classifierNames[index]) 
    129             self.numberOfLearners += 1 
     154        if replace < 0 or replace >= self.number_of_learners: # results for new learner 
     155            self.classifier_names.append(results.classifier_names[index]) 
     156            self.number_of_learners += 1 
    130157            for i,r in enumerate(self.results): 
    131158                r.classes.append(results.results[i].classes[index]) 
    132159                r.probabilities.append(results.results[i].probabilities[index]) 
    133160            if len(self.classifiers): 
    134                 for i in range(self.numberOfIterations): 
     161                for i in range(self.number_of_iterations): 
    135162                    self.classifiers[i].append(results.classifiers[i][index]) 
    136163        else: # replace results of existing learner 
    137             self.classifierNames[replace] = results.classifierNames[index] 
     164            self.classifier_names[replace] = results.classifier_names[index] 
    138165            for i,r in enumerate(self.results): 
    139166                r.classes[replace] = results.results[i].classes[index] 
    140167                r.probabilities[replace] = results.results[i].probabilities[index] 
    141168            if len(self.classifiers): 
    142                 for i in range(self.numberOfIterations): 
     169                for i in range(self.number_of_iterations): 
    143170                    self.classifiers[replace] = results.classifiers[i][index] 
    144171 
    145172    def __repr__(self): 
    146173        return str(self.__dict__) 
     174 
     175 
     176ExperimentResults = deprecated_members({"classValues": "class_values", 
     177                                        "classifierNames": "classifier_names", 
     178                                        "baseClass": "base_class", 
     179                                        "numberOfIterations": "number_of_iterations", 
     180                                        "numberOfLearners": "number_of_learners" 
     181                                        })(ExperimentResults) 
     182 
    147183#### Experimental procedures 
    148184class Evaluation(object): 
     
    192228        return self.test_with_indices(learners, examples, indices=range(len(examples)), preprocessors=preprocessors, 
    193229                                 callback=callback, store_classifiers=store_classifiers, store_examples=store_examples) 
     230     
     231    def check_test_type(self, instances, learners): 
     232        learner_is_mlc = [isinstance(l, Orange.multilabel.MultiLabelLearner) 
     233                          for l in learners] 
     234        multi_label = any(learner_is_mlc) 
     235        if multi_label and not all(learner_is_mlc): 
     236            raise ValueError("Test on mixed types of learners (MLC and non-MLC) not possible") 
     237         
     238        if multi_label and not instances.domain.class_vars: 
     239            raise ValueError("Test data with multiple labels (class vars) expected") 
     240        if not multi_label and not instances.domain.class_var: 
     241            raise ValueError("Test data set without class attributes") 
     242         
     243        return [TEST_TYPE_SINGLE, TEST_TYPE_MLC][multi_label] 
    194244 
    195245     
     
    216266        if not examples: 
    217267            raise ValueError("Test data set with no examples") 
    218         if not examples.domain.classVar: 
    219             raise ValueError("Test data set without class attribute") 
     268        test_type = self.check_test_type(examples, learners) 
    220269        if "cache" in kwargs: 
    221270            raise ValueError("This feature is no longer supported.") 
    222  
    223271 
    224272        niterations = max(indices)+1 
     
    226274                                        classifierNames = [getobjectname(l) for l in learners], 
    227275                                        domain=examples.domain, 
    228                                         weights=weight) 
     276                                        weights=weight, 
     277                                        test_type=test_type) 
    229278 
    230279        test_result.results = [test_result.create_tested_example(indices[i], example) 
     
    288337 
    289338        examples, weight = demangle_examples(examples) 
     339        test_type = self.check_test_type(examples, learners) 
    290340 
    291341        # If preprocessors are not used, we use the same dataset for learning and testing. Otherwise we need to 
     
    300350 
    301351        test_results = ExperimentResults(1, 
    302                                         classifierNames = [getobjectname(l) for l in learners], 
     352                                        classifier_names = [getobjectname(l) for l in learners], 
     353                                        test_type = test_type, 
    303354                                        domain=examples.domain, 
    304355                                        weights=weight) 
     
    336387        test_set, test_weight = demangle_examples(test_set) 
    337388 
     389        test_type = self.check_test_type(learn_set, learners) 
     390        self.check_test_type(test_set, learners) 
     391         
    338392        test_results = ExperimentResults(1, 
    339                                         classifierNames = [getobjectname(l) for l in learners], 
     393                                        classifier_names = [getobjectname(l) for l in learners], 
    340394                                        domain=test_set.domain, 
     395                                        test_type = test_type, 
    341396                                        weights=test_weight) 
    342397        test_results.results = [test_results.create_tested_example(0, example) 
     
    383438        examples, weight = demangle_examples(examples) 
    384439 
     440        test_type = self.check_test_type(examples, learners) 
     441         
    385442        test_results = ExperimentResults(times, 
    386443                                        classifierNames = [getobjectname(l) for l in learners], 
    387444                                        domain=examples.domain, 
     445                                        test_type = test_type, 
    388446                                        weights=weight) 
    389447        test_results.classifiers = [] 
     
    512570        learn_set, learn_weight = demangle_examples(learn_set) 
    513571        test_set, test_weight = demangle_examples(test_set) 
    514  
     572        test_type = self.check_test_type(learn_set, learners) 
     573        self.check_test_type(test_set, learners) 
     574         
    515575        indices = Orange.core.MakeRandomIndices2(stratified = stratification, randomGenerator = random_generator) 
    516576         
     
    520580                                        classifierNames = [getobjectname(l) for l in learners], 
    521581                                        domain=test_set.domain, 
     582                                        test_type = test_type, 
    522583                                        weights=test_weight) 
    523584            offset = 0 
     
    550611 
    551612        examples, weight = demangle_examples(examples) 
     613        test_type = self.check_test_type(examples, learners) 
    552614 
    553615        test_results = ExperimentResults(1, 
    554616                                        classifierNames = [getobjectname(l) for l in classifiers], 
    555617                                        domain=examples.domain, 
     618                                        test_type = test_type, 
    556619                                        weights=weight) 
    557620        test_results.results = [test_results.create_tested_example(0, example) 
     
    602665                # Hide actual class to prevent cheating 
    603666                ex2 = Orange.data.Instance(example) 
    604                 ex2.setclass("?") 
     667                if ex2.domain.class_var: ex2.setclass("?") 
     668                if ex2.domain.class_vars: ex2.set_classes(["?" for cv in ex2.domain.class_vars]) 
    605669                result = classifier(ex2, Orange.core.GetBoth) 
    606670                results.append((e, c, result)) 
  • orange/Orange/multilabel/br.py

    r9500 r9505  
    66*************************************** 
    77 
    8 The most common problem transformation method is Binary Relevance method.  
    9 It learns :math:`|L|` binary classifiers :math:`H_l:X \\rightarrow \{l,-l\}`,  
     8The most basic problem transformation method for multi-label classification 
     9is the Binary Relevance method.  
     10It learns :math:`|L|` binary classifiers :math:`H_l:X \\rightarrow \{l, \\neg l\}`,  
    1011one for each different label :math:`l` in :math:`L`. 
    1112It transforms the original data set into :math:`|L|` data sets :math:`D_l`  
     
    1819International Journal of Data Warehousing and Mining, 3(3):1-13, 2007. 
    1920 
    20 Note that a copy of the table is made for each label to enable construction of 
     21Note that a copy of the table is made in RAM for each label to enable construction of 
    2122a classifier. Due to technical limitations, that is currently unavoidable and 
    2223should be remedied in Orange 3. 
     
    2728   :show-inheritance: 
    2829  
    29    .. method:: __new__(instances, base_learner, **argkw)  
    30    BinaryRelevanceLearner Constructor 
    31     
    3230   :param instances: a table of instances. 
    3331   :type instances: :class:`Orange.data.Table` 
    3432       
    35    :param base_learner: the binary learner, the default learner is BayesLearner 
     33   :param base_learner: the binary learner, the default learner is  
     34                        :class:`Orange.classification.bayes.NaiveLearner`. 
    3635   :type base_learner: :class:`Orange.classification.Learner` 
    3736 
     
    4140   :show-inheritance: 
    4241 
    43    .. method:: __call__(self, example, result_type) 
    44    :rtype: a list of :class:`Orange.data.Value`,  
    45               a list of :class:`Orange.statistics.Distribution` or a tuple 
    46               with both  
    4742    
    4843Examples 
     
    5045 
    5146The following example demonstrates a straightforward invocation of 
    52 this algorithm (`mlc-classify.py`_, uses `multidata.tab`_): 
     47this algorithm (:download:`mlc-classify.py <code/mlc-classify.py>`, uses 
     48:download:`emotions.tab <code/emotions.tab>`): 
    5349 
    5450.. literalinclude:: code/mlc-classify.py 
    55    :lines: 1-13 
    56  
    57 .. _mlc-classify.py: code/mlc-br-example.py 
    58 .. _multidata.tab: code/multidata.tab 
     51   :lines: 6, 15-17 
    5952 
    6053""" 
     
    10598class BinaryRelevanceClassifier(_multibase.MultiLabelClassifier): 
    10699    def __init__(self, **kwds): 
    107         self.multi_flag = 1 
    108100        self.__dict__.update(kwds) 
    109101         
    110102    def __call__(self, instance, result_type=Orange.classification.Classifier.GetValue): 
     103        """ 
     104        :rtype: a list of :class:`Orange.data.Value`, a list of :class:`Orange.statistics.distribution.Distribution`, or a tuple with both 
     105        """ 
    111106        domain = self.instances.domain 
    112107        labels = [] 
  • orange/Orange/multilabel/brknn.py

    r9500 r9505  
    1818   :show-inheritance: 
    1919  
    20    .. method:: __new__(instances, **argkw)  
    21    BRkNNLearner Constructor 
    22     
    2320   :param instances: a table of instances. 
    2421   :type instances: :class:`Orange.data.Table` 
     
    2825   :members: 
    2926   :show-inheritance: 
    30  
    31    .. method:: __call__(self, example, result_type) 
    32    :rtype: a list of :class:`Orange.data.Value`,  
    33               :class:`Orange.statistics.Distribution` or a tuple with both  
    3427    
    3528Examples 
     
    3730 
    3831The following example demonstrates a straightforward invocation of 
    39 this algorithm (`mlc-classify.py`_, uses `multidata.tab`_): 
     32this algorithm (:download:`mlc-classify.py <code/mlc-classify.py>`, uses 
     33:download:`emotions.tab <code/emotions.tab>`): 
    4034 
    4135.. literalinclude:: code/mlc-classify.py 
    42    :lines: 1-3, 24-27 
    43  
    44 .. _mlc-classify.py: code/mlc-example.py 
    45 .. _multidata.tab: code/multidata.tab 
     36   :lines: 6-9 
    4637 
    4738""" 
     
    121112class BRkNNClassifier(_multiknn.MultikNNClassifier): 
    122113    def __call__(self, instance, result_type=Orange.classification.Classifier.GetValue): 
     114        """ 
     115        :rtype: a list of :class:`Orange.data.Value`, a list of :class:`Orange.statistics.distribution.Distribution`, or a tuple with both 
     116        """ 
    123117        domain = self.instances.domain 
    124118 
  • orange/Orange/multilabel/lp.py

    r9500 r9505  
    77 
    88LabelPowerset Classification is another transformation method for multi-label classification.  
    9 It considers each different set of labels that exist in the multi-label data as a  
    10 single label. It so learns one single-label classifier :math:`H:X \\rightarrow P(L)`, where 
    11 :math:`P(L)` is the power set of L. 
     9It considers each different set of labels that exists in the multi-label data as a  
     10single class. Thus it learns a classification problem :math:`H:X \\rightarrow \\mathbb{P}(L)`, where 
     11:math:`\\mathbb{P}(L)` is the power set of L. 
    1212For more information, see G. Tsoumakas and I. Katakis. `Multi-label classification: An overview  
    1313<http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.104.9401&rep=rep1&type=pdf>`_.  
     
    1919   :show-inheritance: 
    2020  
    21    .. method:: __new__(instances, base_learner, **argkw)  
    22    LabelPowersetLearner Constructor 
    23     
    2421   :param instances: a table of instances. 
    2522   :type instances: :class:`Orange.data.Table` 
     
    3229   :members: 
    3330   :show-inheritance: 
    34  
    35    .. method:: __call__(self, example, result_type) 
    36    :rtype: a list of :class:`Orange.data.Value`,  
    37               :class:`Orange.statistics.Distribution` or a tuple with both  
    3831    
    3932Examples 
     
    4134 
    4235The following example demonstrates a straightforward invocation of 
    43 this algorithm (`mlc-classify.py`_, uses `multidata.tab`_): 
     36this algorithm (:download:`mlc-classify.py <code/mlc-classify.py>`, uses 
     37:download:`emotions.tab <code/emotions.tab>`): 
    4438 
    4539.. literalinclude:: code/mlc-classify.py 
    46    :lines: 1-3, 15-22 
    47  
    48 .. _mlc-classify.py: code/mlc-example.py 
    49 .. _multidata.tab: code/multidata.tab 
     40   :lines: 6, 19-21 
    5041 
    5142""" 
     
    111102class LabelPowersetClassifier(_multibase.MultiLabelClassifier): 
    112103    def __call__(self, instance, result_type=Orange.classification.Classifier.GetValue): 
     104        """ 
     105        :rtype: a list of :class:`Orange.data.Value`, a list of :class:`Orange.statistics.distribution.Distribution`, or a tuple with both 
     106        """ 
    113107        labels = [] 
    114108        prob = [] 
  • orange/Orange/multilabel/mlknn.py

    r9500 r9505  
    1010In essence, ML-kNN uses the kNN algorithm independently for each label :math:`l`. 
    1111It finds the k nearest examples to the test instance and considers those that are 
    12 labelled at least with :math:`l` as positive and the rest as negative. 
     12labeled at least with :math:`l` as positive and the rest as negative. 
    1313Actually this method follows the paradigm of Binary Relevance (BR). What mainly 
    1414differentiates this method from BR is the use of prior probabilities. ML-kNN has also 
     
    2222   :members: 
    2323   :show-inheritance: 
    24   
    25    .. method:: __new__(instances, **argkw)  
    26    MLkNNLearner Constructor 
    27     
     24 
    2825   :param instances: a table of instances. 
    2926   :type instances: :class:`Orange.data.Table` 
    30  
    31 .. index:: MLkNN Classifier 
     27  
     28 
     29.. index:: ML-kNN Classifier 
    3230.. autoclass:: Orange.multilabel.MLkNNClassifier 
    3331   :members: 
    3432   :show-inheritance: 
    3533 
    36    .. method:: __call__(self, example, result_type) 
    37    :rtype: a list of :class:`Orange.data.Value`,  
    38               :class:`Orange.statistics.Distribution` or a tuple with both  
    39     
    4034Examples 
    4135======== 
    4236 
    4337The following example demonstrates a straightforward invocation of 
    44 this algorithm (`mlc-classify.py`_, uses `multidata.tab`_): 
     38this algorithm (:download:`mlc-classify.py <code/mlc-classify.py>`, uses 
     39:download:`emotions.tab <code/emotions.tab>`): 
    4540 
    4641.. literalinclude:: code/mlc-classify.py 
    47    :lines: 1-3, 24-27 
    48  
    49 .. _mlc-classify.py: code/mlc-example.py 
    50 .. _multidata.tab: code/multidata.tab 
     42   :lines: 6, 11-13 
    5143 
    5244""" 
     
    168160 
    169161    def compute_prior(self, instances): 
    170         """ Computing Prior Probabilities for each label of the training set """ 
     162        """ Compute prior probability for each label of the training set. """ 
    171163        prior_prob = [] 
    172164        for lvar in instances.domain.class_vars: 
     
    176168             
    177169    def compute_cond(self, instances): 
    178         """ Computing Posterior Probabilities for each label of the training set """ 
     170        """ Compute posterior probabilities for each label of the training set. """ 
    179171        k = self.k 
    180172         
     
    209201class MLkNNClassifier(_multiknn.MultikNNClassifier):       
    210202    def __call__(self, instance, result_type=Orange.classification.Classifier.GetValue): 
     203        """ 
     204        :rtype: a list of :class:`Orange.data.Value`, a list of :class:`Orange.statistics.distribution.Distribution`, or a tuple with both 
     205        """ 
    211206        neighbors = self.knn(instance, self.k) 
    212207         
  • orange/Orange/multilabel/mulan.py

    r9500 r9505  
    1919    doc = xml.dom.minidom.parse(xml_name) 
    2020     
    21     labels = [] 
    22     for node in doc.getElementsByTagName("label"): 
    23         labels.append( node.getAttribute("name").__str__() ) 
     21    labels = [str(node.getAttribute("name")) 
     22              for node in doc.getElementsByTagName("label")] 
    2423         
    2524    #load ARFF file 
     
    2827     
    2928    #remove class tag 
    30     features = [v for v in domain.attributes if v.name not in labels] 
    31     class_vars = [v for v in domain.attributes if v.name in labels] 
     29    features = [v for v in domain.variables if v.name not in labels] 
     30    class_vars = [v for v in domain.variables if v.name in labels] 
    3231    domain = Orange.data.Domain(features, None, class_vars = class_vars) 
    3332     
  • orange/Orange/multilabel/multibase.py

    r9460 r9505  
    22from Orange.core import BayesLearner as _BayesLearner 
    33 
    4 ''' 
    5  * Base class for multi-label learners, to handle multi-label data. 
    6  * 
    7  * @author Wencan Luo (wencanluo.cn@gmail.com) 
    8  * @version : 0.01 $  
    9 ''' 
    10   
    114class MultiLabelLearner(Orange.classification.Learner): 
    125    def __new__(cls, **argkw): 
     
    158     
    169    def __init__(self, **argkw): 
    17         self.multi_flag = 1 
    1810        self.__dict__.update(argkw) 
    1911         
    2012class MultiLabelClassifier(Orange.classification.Classifier): 
    2113    def __init__(self, **argkw): 
    22         self.multi_flag = 1 
    2314        self.__dict__.update(argkw) 
    2415      
  • orange/Orange/multilabel/multiknn.py

    r9500 r9505  
    1010 
    1111MultikNN Classification is the base class of kNN method based multi-label 
    12 classification.  
    13 It is an adaptation of the kNN lazy learning algorithm for multi-label data.  
    14 For more information, see Zhang, M. and Zhou, Z. 2007. `ML-KNN: A lazy learning 
    15 approach to multi-label learning <http://dx.doi.org/10.1016/j.patcog.2006.12.019>`_.  
    16 Pattern Recogn. 40, 7 (Jul. 2007), 2038-2048.   
     12classification. 
    1713 
    1814.. index:: MultikNN Learner 
     
    2117   :show-inheritance: 
    2218  
    23    .. method:: __new__(instances, **argkw)  
    24    MLkNNLearner Constructor 
    25     
    2619   :param instances: a table of instances. 
    2720   :type instances: :class:`Orange.data.Table` 
    2821 
    29 .. index:: MLkNN Classifier 
    30 .. autoclass:: Orange.multilabel.MLkNNClassifier 
     22.. index:: MultikNN Classifier 
     23.. autoclass:: Orange.multilabel.MultikNNClassifier 
    3124   :members: 
    3225   :show-inheritance: 
    33  
    34    .. method:: __call__(self, example, result_type) 
    35    :rtype: a list of :class:`Orange.data.Value`,  
    36               :class:`Orange.statistics.Distribution` or a tuple with both  
    3726    
    38 Examples 
    39 ======== 
    40  
    41 The following example demonstrates a straightforward invocation of 
    42 this algorithm (`mlc-classify.py`_, uses `multidata.tab`_): 
    43  
    44 .. literalinclude:: code/mlc-classify.py 
    45    :lines: 1-3, 24-27 
    46  
    47 .. _mlc-classify.py: code/mlc-example.py 
    48 .. _multidata.tab: code/multidata.tab 
    49  
    5027""" 
    5128import random 
     
    9774        self.weight_id = weight_id 
    9875 
    99 class MultikNNClassifier(_multibase.MultiLabelClassifier):    
     76class MultikNNClassifier(_multibase.MultiLabelClassifier): 
    10077    pass 
    10178         
  • orange/OrangeWidgets/Classify/OWRandomForest.py

    r9450 r9505  
    1414 
    1515class OWRandomForest(OWWidget): 
    16     settingsList = ["name", "trees", "attributes", "attributesP", "preNodeInst", "preNodeInstP", "limitDepth", "limitDepthP", "rseed", "outtree" ] 
     16    settingsList = ["name", "trees", "attributes", "attributesP", "preNodeInst", "preNodeInstP", "limitDepth", "limitDepthP", "rseed"] 
    1717 
    1818    def __init__(self, parent=None, signalManager = None, name='Random Forest'): 
    1919        OWWidget.__init__(self, parent, signalManager, name, wantMainArea=False, resizingEnabled=False) 
    2020 
    21         self.inputs = [("Examples", ExampleTable, self.setData), ("Preprocess", PreprocessedLearner, self.setPreprocessor)] 
    22         self.outputs = [("Learner", orange.Learner),("Random Forest Classifier", orange.Classifier),("Choosen Tree", orange.TreeClassifier) ] 
     21        self.inputs = [("Examples", ExampleTable, self.setData), 
     22                       ("Preprocess", PreprocessedLearner, self.setPreprocessor)] 
     23         
     24        self.outputs = [("Learner", orange.Learner), 
     25                        ("Random Forest Classifier", orange.Classifier)] 
    2326 
    2427        self.name = 'Random Forest' 
     
    3134        self.limitDepthP = 3 
    3235        self.rseed = 0 
    33         self.outtree = 0 
    3436 
    3537        self.maxTrees = 10000 
     
    5961        OWGUI.separator(self.controlArea) 
    6062 
    61         #self.sBox = QVGroupBox(self.controlArea) 
    62         #self.sBox.setTitle('Single Tree Output') 
    63  
    64         self.streesBox = OWGUI.spin(self.controlArea, self, "outtree", -1, self.maxTrees, orientation="horizontal", label="Index of tree on the output", callback=[self.period, self.extree]) 
    65         #self.streesBox.setDisabled(True) 
    66         self.streeEnabled(False) 
    67  
    6863        OWGUI.separator(self.controlArea) 
    6964 
     
    8378                           ]) 
    8479        self.reportData(self.data) 
    85          
    86     def period(self): 
    87         if self.outtree == -1: self.outtree = self.claTrees-1 
    88         elif self.outtree >= self.claTrees: self.outtree = 0 
    89  
    90     def extree(self): 
    91         self.send("Choosen Tree", self.classifier.classifiers[self.outtree]) 
    92  
    93     def streeEnabled(self, status): 
    94         if status: 
    95             self.claTrees = self.trees 
    96             self.streesBox.setDisabled(False) 
    97             self.period() 
    98             self.extree() 
    99         else: 
    100             #a = 1 
    101             self.streesBox.setDisabled(True) 
    10280 
    10381    def constructLearner(self): 
     
    10886            attrs = self.attributesP 
    10987 
    110         smallLearner = orngTree.TreeLearner() 
     88        from Orange.classification.tree import SimpleTreeLearner 
     89         
     90        smallLearner = SimpleTreeLearner() 
    11191 
    11292        if self.preNodeInst: 
    113             smallLearner.stop.minExamples = self.preNodeInstP  
     93            smallLearner.min_instances = self.preNodeInstP  
    11494        else: 
    115             smallLearner.stop.minExamples = 0 
    116  
    117         smallLearner.storeExamples = 1 
    118         smallLearner.storeNodeClassifier = 1 
    119         smallLearner.storeContingencies = 1 
    120         smallLearner.storeDistributions = 1 
     95            smallLearner.min_instances = 0 
    12196 
    12297        if self.limitDepth: 
    123             smallLearner.maxDepth = self.limitDepthP 
     98            smallLearner.max_depth = self.limitDepthP  
    12499         
    125100        learner = orngEnsemble.RandomForestLearner(base_learner=smallLearner,  
     
    155130                self.classifier = learner(self.data) 
    156131                self.classifier.name = self.name 
    157                 self.streeEnabled(True) 
    158132            except Exception, (errValue): 
    159133                self.error(str(errValue)) 
    160134                self.classifier = None 
    161                 self.streeEnabled(False) 
    162135            pb.finish() 
    163136        else: 
    164137            self.classifier = None 
    165             self.streeEnabled(False) 
    166138 
    167139        self.send("Random Forest Classifier", self.classifier) 
  • orange/OrangeWidgets/Data/OWFile.py

    r9492 r9505  
    117117 
    118118        if len(self.recentFiles) > 0 and exists(self.recentFiles[0]): 
    119             try: 
    120                 self.openFile(self.recentFiles[0], 0, self.symbolDK, self.symbolDC) 
    121             except: 
    122                 pass 
     119            self.openFile(self.recentFiles[0], 0, self.symbolDK, self.symbolDC) 
     120 
    123121        self.connect(self.filecombo, SIGNAL('activated(int)'), self.selectFile) 
    124122 
     
    286284        warnings = "" 
    287285        metas = data.domain.getmetas() 
    288         if hasattr(data, "attribute_load_status"):  # For some file formats, this is not populated 
    289             for status, messageUsed, messageNotUsed in [ 
    290                                     (orange.Variable.MakeStatus.Incompatible, 
    291                                      "", 
    292                                      "The following attributes already existed but had a different order of values, so new attributes needed to be created"), 
    293                                     (orange.Variable.MakeStatus.NoRecognizedValues, 
    294                                      "The following attributes were reused although they share no common values with the existing attribute of the same names", 
    295                                      "The following attributes were not reused since they share no common values with the existing attribute of the same names"), 
    296                                     (orange.Variable.MakeStatus.MissingValues, 
    297                                      "The following attribute(s) were reused although some values needed to be added", 
    298                                      "The following attribute(s) were not reused since they miss some values") 
    299                                     ]: 
    300                 if self.createNewOn > status: 
    301                     message = messageUsed 
     286        for status, messageUsed, messageNotUsed in [ 
     287                                (orange.Variable.MakeStatus.Incompatible, 
     288                                 "", 
     289                                 "The following attributes already existed but had a different order of values, so new attributes needed to be created"), 
     290                                (orange.Variable.MakeStatus.NoRecognizedValues, 
     291                                 "The following attributes were reused although they share no common values with the existing attribute of the same names", 
     292                                 "The following attributes were not reused since they share no common values with the existing attribute of the same names"), 
     293                                (orange.Variable.MakeStatus.MissingValues, 
     294                                 "The following attribute(s) were reused although some values needed to be added", 
     295                                 "The following attribute(s) were not reused since they miss some values") 
     296                                ]: 
     297            if self.createNewOn > status: 
     298                message = messageUsed 
     299            else: 
     300                message = messageNotUsed 
     301            if not message: 
     302                continue 
     303            attrs = [attr.name for attr, stat in zip(data.domain, data.attributeLoadStatus) if stat == status] \ 
     304                  + [attr.name for id, attr in metas.items() if data.metaAttributeLoadStatus.get(id, -99) == status] 
     305            if attrs: 
     306                jattrs = ", ".join(attrs) 
     307                if len(jattrs) > 80: 
     308                    jattrs = jattrs[:80] + "..." 
     309                if len(jattrs) > 30:  
     310                    warnings += "<li>%s:<br/> %s</li>" % (message, jattrs) 
    302311                else: 
    303                     message = messageNotUsed 
    304                 if not message: 
    305                     continue 
    306                 attrs = [] 
    307                 attrs = [attr.name for attr, stat in zip(data.domain, data.attributeLoadStatus) if stat == status] \ 
    308                       + [attr.name for id, attr in metas.items() if data.metaAttributeLoadStatus.get(id, -99) == status] 
    309                 if attrs: 
    310                     jattrs = ", ".join(attrs) 
    311                     if len(jattrs) > 80: 
    312                         jattrs = jattrs[:80] + "..." 
    313                     if len(jattrs) > 30:  
    314                         warnings += "<li>%s:<br/> %s</li>" % (message, jattrs) 
    315                     else: 
    316                         warnings += "<li>%s: %s</li>" % (message, jattrs) 
     312                    warnings += "<li>%s: %s</li>" % (message, jattrs) 
    317313 
    318314        self.warnings.setText(warnings) 
  • orange/OrangeWidgets/Evaluate/OWCalibrationPlot.py

    r9497 r9505  
    333333 
    334334        self.dres = dres 
    335          
    336         self.warning(0) 
    337         if self.dres and len(dres.results) > 0 and dres.results[0].multilabel_flag == 1: 
    338             text = "there is no consensus on how to apply it in multi-class problems" 
    339             self.warning(0, text) 
    340             return 
    341          
     335 
    342336        self.graphs = [] 
    343337        if self.dres <> None: 
  • orange/OrangeWidgets/Evaluate/OWConfusionMatrix.py

    r9497 r9505  
    110110            return 
    111111 
    112         self.warning(0) 
    113         if len(res.results) > 0 and res.results[0].multilabel_flag == 1: 
    114             text = "there is no consensus on how to apply it in multi-class problems" 
    115             self.warning(0, text) 
    116             return 
    117          
    118112        self.matrix = orngStat.confusionMatrices(res, -2) 
    119113 
  • orange/OrangeWidgets/Evaluate/OWLiftCurve.py

    r9497 r9505  
    346346            self.openContext("", dres) 
    347347            return 
    348          
    349         self.warning(0) 
    350         if len(dres.results) > 0 and dres.results[0].multilabel_flag == 1: 
    351             text = "there is no consensus on how to apply it in multi-class problems" 
    352             self.warning(0, text) 
    353             return 
    354          
     348 
    355349        self.defaultPerfLinePValues = [] 
    356350        if self.dres <> None: 
  • orange/OrangeWidgets/Evaluate/OWROC.py

    r9497 r9505  
    1212import orngStat, orngTest 
    1313import statc, math 
    14 import time 
    15 import warnings 
    1614 
    1715def TCconvexHull(curves): 
     
    975973            self.openContext("", dres) 
    976974            return 
    977          
    978         self.warning(0) 
    979         if len(dres.results) > 0 and dres.results[0].multilabel_flag == 1: 
    980             text = "there is no consensus on how to apply it in multi-class problems" 
    981             self.warning(0, text) 
    982             return 
    983              
    984975        self.dres = dres 
    985976 
  • orange/OrangeWidgets/Evaluate/OWTestLearners.py

    r9483 r9505  
    1717                        orange.AttributeWarning) 
    1818 
    19 import Orange.multilabel.label as label 
    20 import Orange 
    21  
    2219############################################################################## 
    2320 
     
    6360        ('Recall', 'Recall', 'recall(cm)', False, True), 
    6461        ('Brier score', 'Brier', 'BrierScore(res)', True), 
    65         ('Matthews correlation coefficient', 'MCC', 'MCC(cm)', False, True), 
    66         ]] 
    67      
     62        ('Matthews correlation coefficient', 'MCC', 'MCC(cm)', False, True)]] 
     63 
    6864    rStatistics = [Score(*s) for s in [\ 
    6965        ("Mean squared error", "MSE", "MSE(res)", False), 
     
    7470        ("Relative absolute error", "RAE", "RAE(res)", False), 
    7571        ("R-squared", "R2", "R2(res)")]] 
    76      
    77     #multi-label statistics 
    78     mStatistics = [Score(*s) for s in [\ 
    79         ('Hamming Loss', 'HammingLoss', 'mlc_hamming_loss(res)', False), 
    80         ('Accuracy', 'Accuracy', 'mlc_accuracy(res)', False), 
    81         ('Precision', 'Precision', 'mlc_precision(res)', False), 
    82         ('Recall', 'Recall', 'mlc_recall(res)', False),                                
    83         ]] 
    84      
     72 
    8573    resamplingMethods = ["Cross-validation", "Leave-one-out", "Random sampling", 
    8674                         "Test on train data", "Test on test data"] 
     
    10189        self.selectedCScores = [i for (i,s) in enumerate(self.cStatistics) if s.show] 
    10290        self.selectedRScores = [i for (i,s) in enumerate(self.rStatistics) if s.show] 
    103         self.selectedMScores = [i for (i,s) in enumerate(self.mStatistics) if s.show] 
    10491        self.targetClass = 0 
    10592        self.loadSettings() 
     
    184171                                     selectionMode = QListWidget.MultiSelection, 
    185172                                     callback=self.newscoreselection) 
    186  
    187          
    188         self.mStatLabels = [s.name for s in self.mStatistics] 
    189         self.mbox = OWGUI.widgetBox(self.controlArea, "Performance scores", addToLayout=False) 
    190         self.mstatLB = OWGUI.listBox(self.mbox, self, 'selectedMScores', 'mStatLabels', 
    191                                      selectionMode = QListWidget.MultiSelection, 
    192                                      callback=self.newscoreselection) 
    193173         
    194174        self.statLayout.addWidget(self.cbox) 
    195175        self.statLayout.addWidget(self.rbox) 
    196         self.statLayout.addWidget(self.mbox) 
    197176        self.controlArea.layout().addLayout(self.statLayout) 
    198177         
     
    211190            return True 
    212191        return self.data.domain.classVar.varType == orange.VarTypes.Discrete 
    213      
    214     def is_multilabel(self): 
    215         if not self.data: 
    216             return False  
    217         return label.is_multilabel(self.data) == 1 
    218      
    219     def set_usestat(self): 
    220         #usestat = [self.selectedRScores, self.selectedCScores][self.isclassification()] 
    221         if self.is_multilabel(): 
    222             usestat = self.selectedMScores 
    223         else: 
    224             usestat = [self.selectedRScores, self.selectedCScores][self.isclassification()] 
    225         return usestat 
    226      
    227     def set_newstate(self): 
    228         if self.is_multilabel(): 
    229             stat = self.mStatistics 
    230         else: 
    231             stat = [self.rStatistics, self.cStatistics][self.isclassification()] 
    232         return stat 
    233      
     192         
    234193    def paintscores(self): 
    235194        """paints the table with evaluation scores""" 
     
    262221        self.tab.resizeColumnsToContents() 
    263222        self.tab.resizeRowsToContents() 
    264         usestat = self.set_usestat() 
    265          
     223        usestat = [self.selectedRScores, self.selectedCScores][self.isclassification()] 
    266224        for i in range(len(self.stat)): 
    267225            if i not in usestat: 
     
    276234        else: 
    277235            exset = [] 
    278         if not self.is_multilabel(): 
    279             self.reportSettings("Validation method", 
     236        self.reportSettings("Validation method", 
    280237                            [("Method", self.resamplingMethods[self.resampling])] 
    281238                            + exset + 
    282239                            ([("Target class", self.data.domain.classVar.values[self.targetClass])] if self.data else [])) 
    283         else: 
    284              self.reportSettings("Validation method", 
    285                             [("Method", self.resamplingMethods[self.resampling])] 
    286                             + exset) 
    287240         
    288241        self.reportData(self.data) 
     
    293246            learners.sort() 
    294247            learners = [lt[1] for lt in learners] 
    295             usestat = self.set_usestat() 
     248            usestat = [self.selectedRScores, self.selectedCScores][self.isclassification()] 
     249             
    296250            res = "<table><tr><th></th>"+"".join("<th><b>%s</b></th>" % hr for hr in [s.label for i, s in enumerate(self.stat) if i in usestat])+"</tr>" 
    297251            for i, l in enumerate(learners): 
     
    318272        new = self.data.selectref(indices(self.data)) 
    319273         
    320         multilabel_flag = label.is_multilabel(self.data) #add for multi-label 
    321          
    322274        self.warning(0) 
    323275        learner_exceptions = [] 
     
    328280            try: 
    329281                predictor = learner(new) 
    330                 if multilabel_flag == 0: #single label 
    331                     if predictor(new[0]).varType == new.domain.classVar.varType: 
    332                         learners.append(learner) 
    333                     else: 
    334                         l.scores = [] 
    335                 else:                   #multi-label 
     282                if predictor(new[0]).varType == new.domain.classVar.varType: 
    336283                    learners.append(learner) 
     284                else: 
     285                    l.scores = [] 
    337286            except Exception, ex: 
    338287                learner_exceptions.append((l, ex)) 
     
    348297 
    349298        # computation of results (res, and cm if classification) 
    350         print self.resampling 
    351299        pb = None 
    352300        if self.resampling==0: 
    353301            pb = OWGUI.ProgressBar(self, iterations=self.nFolds) 
    354             #print self.nFolds 
    355302            res = orngTest.crossValidation(learners, self.data, folds=self.nFolds, 
    356303                                           strat=orange.MakeRandomIndices.StratifiedIfPossible, 
    357304                                           callback=pb.advance, storeExamples = True) 
    358             #res = orngTest.crossValidation(learners, self.data, folds=self.nFolds) 
    359              
    360305            pb.finish() 
    361306        elif self.resampling==1: 
     
    445390        """handle input train data set""" 
    446391        self.closeContext() 
    447          
    448         multilabel_flag = label.is_multilabel(data) #add for multi-label 
    449         if multilabel_flag == 0:  
    450             self.data = self.isDataWithClass(data, checkMissing=True) and data or None 
    451         else: 
    452             self.data = data 
    453          
     392        self.data = self.isDataWithClass(data, checkMissing=True) and data or None 
    454393        self.fillClassCombo() 
    455394        if not self.data: 
     
    461400        else: 
    462401            # new data has arrived 
    463             if multilabel_flag == 0: #single label 
    464                 self.data = orange.Filter_hasClassValue(self.data) 
    465             #self.statLayout.setCurrentWidget(self.cbox if self.isclassification() else self.rbox) 
    466              
    467             #print multilabel_flag 
    468             #self.stat = [self.rStatistics, self.cStatistics][self.isclassification()] 
    469             if multilabel_flag == 1: 
    470                 self.statLayout.setCurrentWidget(self.mbox) 
    471             else: 
    472                 self.statLayout.setCurrentWidget(self.cbox if self.isclassification() else self.rbox) 
     402            self.data = orange.Filter_hasClassValue(self.data) 
     403            self.statLayout.setCurrentWidget(self.cbox if self.isclassification() else self.rbox) 
     404             
     405            self.stat = [self.rStatistics, self.cStatistics][self.isclassification()] 
    473406             
    474407            if self.learners: 
    475408                self.score([l.id for l in self.learners.values()]) 
    476             self.stat = self.set_newstate() 
    477              
     409 
    478410        self.openContext("", data) 
    479411        self.paintscores() 
     
    509441         
    510442        if self.targetClass<len(domain.classVar.values): 
    511             self.targetCombo.setCurrentIndex(self.targetClass)              
     443            self.targetCombo.setCurrentIndex(self.targetClass) 
    512444        else: 
    513445            self.targetCombo.setCurrentIndex(0) 
     
    589521    def newscoreselection(self): 
    590522        """handle change in set of scores to be displayed""" 
    591         usestat = self.set_usestat() 
    592          
     523        usestat = [self.selectedRScores, self.selectedCScores][self.isclassification()] 
    593524        for i in range(len(self.stat)): 
    594525            if i in usestat: 
     
    597528            else: 
    598529                self.tab.hideColumn(i+1) 
    599      
     530 
    600531    def recompute(self, forced=False): 
    601532        """recompute the scores for all learners, 
     
    630561 
    631562    data1 = orange.ExampleTable(r'../../doc/datasets/voting') 
    632     data2 = orange.ExampleTable(r'../../doc/datasets/adult') 
    633     datar = orange.ExampleTable(r'../../doc/datasets/balance-scale') 
    634     data3 = orange.ExampleTable(r'../../doc/datasets/bridges') 
    635     data4 = orange.ExampleTable(r'../../doc/datasets/lenses') 
    636     data5 = orange.ExampleTable(r'../../doc/datasets/multidata') 
    637     data6 = orange.ExampleTable(r'../../doc/datasets/emotions') 
     563    data2 = orange.ExampleTable(r'../../golf') 
     564    datar = orange.ExampleTable(r'../../auto-mpg') 
     565    data3 = orange.ExampleTable(r'../../sailing-big') 
     566    data4 = orange.ExampleTable(r'../../sailing-test') 
    638567 
    639568    l1 = orange.MajorityLearner(); l1.name = '1 - Majority' 
     
    649578 
    650579    l4 = orange.MajorityLearner(); l4.name = "4 - Majority" 
    651     l5 = Orange.multilabel.BinaryRelevanceLearner(); l1.name = '5 - BR' 
    652      
    653     l6 = Orange.multilabel.LabelPowersetLearner(); 
    654      
     580 
    655581    import orngRegression as r 
    656582    r5 = r.LinearRegressionLearner(name="0 - lin reg") 
    657583 
    658     testcase = 5 
     584    testcase = 4 
    659585 
    660586    if testcase == 0: # 1(UPD), 3, 4 
     
    690616        ow.setLearner(l2, 5) 
    691617        ow.setTestData(None) 
    692     if testcase == 5: # binary relevance 
    693         ow.setData(data5) 
    694         ow.setLearner(l5, 1) 
    695     if testcase == 6: #label powerset 
    696         ow.setData(data6) 
    697         ow.setLearner(l6,1) 
     618 
    698619    ow.saveSettings() 
  • orange/OrangeWidgets/Unsupervised/OWNxExplorer.py

    r9502 r9505  
    174174            self.colorCombo = OWGUI.comboBox(colorBox, self, "color", callback=self.set_node_colors) 
    175175            self.colorCombo.addItem("(same color)") 
    176             OWGUI.button(colorBox, self, "Set node color palette", self._set_colors, tooltip = "Set node color palette", debuggingEnabled = 0) 
     176            OWGUI.button(colorBox, self, "palette", self._set_colors, tooltip = "Set node color palette", width=60, debuggingEnabled = 0) 
    177177             
    178178            ib = OWGUI.widgetBox(self.verticesTab, "Node size attribute", orientation="vertical", addSpace = False) 
     
    200200            self.edgeColorCombo = OWGUI.comboBox(colorBox, self, "edgeColor", callback=self.set_edge_colors) 
    201201            self.edgeColorCombo.addItem("(same color)") 
    202             OWGUI.button(colorBox, self, "Set edge color palette", self._set_edge_color_palette, tooltip = "Set edge color palette", debuggingEnabled = 0) 
     202            OWGUI.button(colorBox, self, "palette", self._set_edge_color_palette, tooltip = "Set edge color palette", width=60, debuggingEnabled = 0) 
    203203             
    204204            self.edgeLabelBox = OWGUI.widgetBox(self.edgesTab, "Edge labels", addSpace = False) 
     
    34103410             
    34113411         
    3412 if __name__=="__main__":     
     3412if __name__=="__main__": 
    34133413    a=QApplication(sys.argv) 
    34143414    ow=OWNxExplorer() 
    34153415    ow.show() 
    3416     root = 'c:\\Users\\miha\\Projects\\res\\Orange\\test\\' 
    3417     #net = Orange.network.readwrite.read(root + 'airlines_4.net') 
    3418     #net.set_items(Orange.data.Table(root + 'airlines_4.tab')) 
    3419     #net = Orange.network.readwrite.read(root + 'K4K2.net') 
    3420     net = Orange.network.readwrite.read(root + 'K4K4K5_.net') 
    3421     ow.set_graph(net) 
    3422     #ow.handleNewSignals() 
    3423     #import OWNetExplorer 
    3424     #ow1=OWNetExplorer.OWNetExplorer() 
    3425     #ow1.show() 
    3426     #net1 = Orange.network.Network.read(root + 'K4K4K5_.net') 
    3427     #ow1.setGraph(net1) 
     3416    def setNetwork(signal, data, id=None): 
     3417        if signal == 'Network': 
     3418            ow.set_graph(data) 
     3419        #if signal == 'Items': 
     3420        #    ow.set_items(data) 
     3421         
     3422    import OWNxFile 
     3423    owFile = OWNxFile.OWNxFile() 
     3424    owFile.send = setNetwork 
     3425    owFile.show() 
     3426    owFile.selectNetFile(0) 
     3427     
    34283428    a.exec_() 
    3429     #save settings 
    34303429    ow.saveSettings() 
    3431     #ow1.saveSettings() 
    3432      
     3430    owFile.saveSettings() 
     3431     
  • source/pyxtract/pyprops.py

    r8265 r9505  
    218218    off.write(notice) 
    219219 
    220     off.write('#include "../%s"\n\n' % hppfile) 
     220    off.write('#include "../%s"\n' % hppfile) 
     221    off.write('#include <cstddef>\n\n') 
    221222 
    222223# - add parent fields 
  • source/pyxtract/pyxtract.py

    r8265 r9505  
    532532  newfiles.append(targetname) 
    533533  outfile.write("/* This file was generated by pyxtract \n   Do not edit.*/\n\n") 
     534  outfile.write('#include <cstddef>\n\n') 
    534535 
    535536  usedbases=usedbases.keys() 
  • testing/regressionTests/results/orange25/bayes-run.py.txt

    r7422 r9505  
    1 Iris-setosa Iris-setosa 
    2 Iris-setosa Iris-setosa 
    3 Iris-setosa Iris-setosa 
    4 Iris-setosa Iris-setosa 
    5 Iris-setosa Iris-setosa 
     1yes no 
     2yes no 
     3yes no 
     4yes no 
     5yes no 
  • testing/regressionTests/xtest_one.py

    r8266 r9505  
    44import os as t__os 
    55 
    6 #ignore deprection warnings 
     6#ignore warnings 
    77import warnings as t__warnings 
    8 t__warnings.simplefilter("ignore", DeprecationWarning) 
     8t__warnings.simplefilter("ignore") 
    99 
    1010NO_RANDOMNESS = 1 # prevent random parts of scripts to run 
Note: See TracChangeset for help on using the changeset viewer.