# Changeset 9454:137d562b7f33 in orange

Ignore:
Timestamp:
07/06/11 17:00:20 (3 years ago)
Branch:
default
Convert:
2b90bf810db48a0a9f5656fd260014ed20c88b3e
Message:

Finish the initial version of widget TestLearner for multi-label

Location:
orange
Files:
2 deleted
3 edited

Unmodified
Removed
• ## orange/Orange/evaluation/scoring.py

 r9452 and :math:Z_i=H(x_i) be the set of labels predicted by :math:H for example :math:x_i. .. autofunction:: hamming_loss .. autofunction:: accuracy .. autofunction:: precision .. autofunction:: recall .. autofunction:: mlc_hamming_loss .. autofunction:: mlc_accuracy .. autofunction:: mlc_precision .. autofunction:: mlc_recall So, let's compute all this in part of print_figure(fig, filename, **kwargs) def hamming_loss(res): def mlc_hamming_loss(res): """ Schapire and Singer (2000) presented Hamming Loss, which id defined as: return [x/label_num/example_num for x in losses] def accuracy(res, forgiveness_rate = 1.0): def mlc_accuracy(res, forgiveness_rate = 1.0): """ Godbole & Sarawagi, 2004 uses the metrics accuracy, precision, recall as follows: return [math.pow(x/example_num,forgiveness_rate) for x in accuracies] def precision(res): def mlc_precision(res): """ :math:Precision(H,D)=\\frac{1}{|D|} \\sum_{i=1}^{|D|} \\frac{|Y_i \\cap Z_i|}{|Z_i|} return [x/example_num for x in precisions] def recall(res): def mlc_recall(res): """ :math:Recall(H,D)=\\frac{1}{|D|} \\sum_{i=1}^{|D|} \\frac{|Y_i \\cap Z_i|}{|Y_i|} return [x/example_num for x in recalls] def ranking_loss(res): def mlc_ranking_loss(res): pass def average_precision(res): def mlc_average_precision(res): pass def hierarchical_loss(res): def mlc_hierarchical_loss(res): pass
• ## orange/OrangeWidgets/Multilabel/OWTestLearners.py

 r9453 ('Recall', 'Recall', 'recall(cm)', False, True), ('Brier score', 'Brier', 'BrierScore(res)', True), ('Hamming Loss', 'HammingLoss', 'hamming_loss(res)', True), ('Hamming Loss', 'HammingLoss', 'mlc_hamming_loss(res)', True), ('Accuracy', 'Accuracy', 'mlc_accuracy(res)', True), ('Precision', 'Precision', 'mlc_precision(res)', True), ('Recall', 'Recall', 'mlc_recall(res)', True), ('Matthews correlation coefficient', 'MCC', 'MCC(cm)', False, True), ]] self.learners[id].results = None return print self.data.domain # test which learners can accept the given data set # e.g., regressions can't deal with classification data indices = orange.MakeRandomIndices2(p0=min(n, len(self.data)), stratified=orange.MakeRandomIndices2.StratifiedIfPossible) new = self.data #        new = self.data.selectref(indices(self.data)) new = self.data.selectref(indices(self.data)) #        new = self.data.selectref([1]*min(n, len(self.data)) + #                                  [0]*(len(self.data) - min(n, len(self.data)))) for e in new: print e multilabel_flag = label.is_multilabel(self.data) else:                   #multi-label learners.append(learner) print "multi-learner" except Exception, ex: self.warning(0, "Learner %s ends with exception: %s" % (l.name, str(ex))) def setLearner(self, learner, id=None): """add/remove a learner""" print "setLearner" if learner: # a new or updated learner if id in self.learners: # updated learner
• ## orange/doc/Orange/rst/code/mlc-evaluator.py

 r9451 data = Orange.data.Table("multidata") res = Orange.evaluation.testing.cross_validation(learners, data, folds=10, strat=Orange.core.MakeRandomIndices.StratifiedIfPossible) res = Orange.evaluation.testing.cross_validation(learners, data) #res = Orange.evaluation.testing.cross_validation(learners, data) loss = Orange.evaluation.scoring.hamming_loss(res) loss = Orange.evaluation.scoring.mlc_hamming_loss(res) print 'loss=', loss accuracy = Orange.evaluation.scoring.accuracy(res) accuracy = Orange.evaluation.scoring.mlc_accuracy(res) print 'accuracy=', accuracy precision = Orange.evaluation.scoring.precision(res) precision = Orange.evaluation.scoring.mlc_precision(res) print 'precision=', precision recall = Orange.evaluation.scoring.recall(res) recall = Orange.evaluation.scoring.mlc_recall(res) print 'recall=', recall
Note: See TracChangeset for help on using the changeset viewer.