Changeset 9454:137d562b7f33 in orange


Ignore:
Timestamp:
07/06/11 17:00:20 (3 years ago)
Author:
wencanluo <wencanluo@…>
Branch:
default
Convert:
2b90bf810db48a0a9f5656fd260014ed20c88b3e
Message:

Finish the initial version of widget TestLearner for multi-label

Location:
orange
Files:
2 deleted
3 edited

Legend:

Unmodified
Added
Removed
  • orange/Orange/evaluation/scoring.py

    r9452 r9454  
    435435and :math:`Z_i=H(x_i)` be the set of labels predicted by :math:`H` for example :math:`x_i`. 
    436436 
    437 .. autofunction:: hamming_loss  
    438 .. autofunction:: accuracy 
    439 .. autofunction:: precision 
    440 .. autofunction:: recall 
     437.. autofunction:: mlc_hamming_loss  
     438.. autofunction:: mlc_accuracy 
     439.. autofunction:: mlc_precision 
     440.. autofunction:: mlc_recall 
    441441 
    442442So, let's compute all this in part of  
     
    25972597    print_figure(fig, filename, **kwargs) 
    25982598 
    2599 def hamming_loss(res): 
     2599def mlc_hamming_loss(res): 
    26002600    """ 
    26012601    Schapire and Singer (2000) presented Hamming Loss, which id defined as:  
     
    26192619    return [x/label_num/example_num for x in losses] 
    26202620 
    2621 def accuracy(res, forgiveness_rate = 1.0): 
     2621def mlc_accuracy(res, forgiveness_rate = 1.0): 
    26222622    """ 
    26232623    Godbole & Sarawagi, 2004 uses the metrics accuracy, precision, recall as follows: 
     
    26542654    return [math.pow(x/example_num,forgiveness_rate) for x in accuracies] 
    26552655 
    2656 def precision(res): 
     2656def mlc_precision(res): 
    26572657    """ 
    26582658    :math:`Precision(H,D)=\\frac{1}{|D|} \\sum_{i=1}^{|D|} \\frac{|Y_i \\cap Z_i|}{|Z_i|}` 
     
    26812681    return [x/example_num for x in precisions] 
    26822682 
    2683 def recall(res): 
     2683def mlc_recall(res): 
    26842684    """ 
    26852685    :math:`Recall(H,D)=\\frac{1}{|D|} \\sum_{i=1}^{|D|} \\frac{|Y_i \\cap Z_i|}{|Y_i|}` 
     
    27082708    return [x/example_num for x in recalls] 
    27092709 
    2710 def ranking_loss(res): 
     2710def mlc_ranking_loss(res): 
    27112711    pass 
    27122712 
    2713 def average_precision(res): 
     2713def mlc_average_precision(res): 
    27142714    pass 
    27152715 
    2716 def hierarchical_loss(res): 
     2716def mlc_hierarchical_loss(res): 
    27172717    pass 
    27182718 
  • orange/OrangeWidgets/Multilabel/OWTestLearners.py

    r9453 r9454  
    5656        ('Recall', 'Recall', 'recall(cm)', False, True), 
    5757        ('Brier score', 'Brier', 'BrierScore(res)', True), 
    58         ('Hamming Loss', 'HammingLoss', 'hamming_loss(res)', True), 
     58        ('Hamming Loss', 'HammingLoss', 'mlc_hamming_loss(res)', True), 
     59        ('Accuracy', 'Accuracy', 'mlc_accuracy(res)', True), 
     60        ('Precision', 'Precision', 'mlc_precision(res)', True), 
     61        ('Recall', 'Recall', 'mlc_recall(res)', True), 
    5962        ('Matthews correlation coefficient', 'MCC', 'MCC(cm)', False, True), 
    6063        ]] 
     
    275278                self.learners[id].results = None 
    276279            return 
    277         print self.data.domain 
    278280        # test which learners can accept the given data set 
    279281        # e.g., regressions can't deal with classification data 
     
    283285        indices = orange.MakeRandomIndices2(p0=min(n, len(self.data)), stratified=orange.MakeRandomIndices2.StratifiedIfPossible) 
    284286 
    285         new = self.data 
    286 #        new = self.data.selectref(indices(self.data)) 
     287        new = self.data.selectref(indices(self.data)) 
    287288#        new = self.data.selectref([1]*min(n, len(self.data)) + 
    288289#                                  [0]*(len(self.data) - min(n, len(self.data)))) 
    289          
    290         for e in new:  
    291             print e 
    292290         
    293291        multilabel_flag = label.is_multilabel(self.data) 
     
    307305                else:                   #multi-label 
    308306                    learners.append(learner) 
    309                     print "multi-learner" 
    310307            except Exception, ex: 
    311308                self.warning(0, "Learner %s ends with exception: %s" % (l.name, str(ex))) 
     
    466463    def setLearner(self, learner, id=None): 
    467464        """add/remove a learner""" 
    468         print "setLearner" 
    469465        if learner: # a new or updated learner 
    470466            if id in self.learners: # updated learner 
  • orange/doc/Orange/rst/code/mlc-evaluator.py

    r9451 r9454  
    44data = Orange.data.Table("multidata") 
    55 
    6 res = Orange.evaluation.testing.cross_validation(learners, data, folds=10, 
    7                       strat=Orange.core.MakeRandomIndices.StratifiedIfPossible) 
     6res = Orange.evaluation.testing.cross_validation(learners, data) 
    87 
    9 #res = Orange.evaluation.testing.cross_validation(learners, data) 
    10  
    11 loss = Orange.evaluation.scoring.hamming_loss(res) 
     8loss = Orange.evaluation.scoring.mlc_hamming_loss(res) 
    129print 'loss=', loss 
    1310 
    14 accuracy = Orange.evaluation.scoring.accuracy(res) 
     11accuracy = Orange.evaluation.scoring.mlc_accuracy(res) 
    1512print 'accuracy=', accuracy 
    1613 
    17 precision = Orange.evaluation.scoring.precision(res) 
     14precision = Orange.evaluation.scoring.mlc_precision(res) 
    1815print 'precision=', precision 
    1916 
    20 recall = Orange.evaluation.scoring.recall(res) 
     17recall = Orange.evaluation.scoring.mlc_recall(res) 
    2118print 'recall=', recall 
Note: See TracChangeset for help on using the changeset viewer.