# source:orange/Orange/evaluation/scoring.py@10548:6369044e50b6

Revision 10548:6369044e50b6, 97.4 KB checked in by anzeh <anze.staric@…>, 2 years ago (diff)

Removed deprecated_function_name decorator (name didn't change).

Line
1import math
2import functools
4
5import numpy
6
7import Orange
8from Orange import statc, corn
9from Orange.misc import deprecated_keywords, deprecated_function_name, \
10    deprecation_warning, environ
11from Orange.evaluation import testing
12
13try:
14    import matplotlib
15    HAS_MATPLOTLIB = True
16except ImportError:
17    matplotlib = None
18    HAS_MATPLOTLIB = False
19
20#### Private stuff
21
22def log2(x):
23    """Calculate logarithm in base 2."""
24    return math.log(x) / math.log(2)
25
26def check_non_zero(x):
27    """Throw Value Error when x = 0."""
28    if x == 0.:
29        raise ValueError, "Cannot compute the score: no examples or sum of weights is 0."
30
31def gettotweight(res):
32    """Sum all the weights."""
33    totweight = reduce(lambda x, y: x + y.weight, res.results, 0)
34    if totweight == 0.:
35        raise ValueError, "Cannot compute the score: sum of weights is 0."
36    return totweight
37
38def gettotsize(res):
39    """Get number of result instances."""
40    if len(res.results):
41        return len(res.results)
42    else:
43        raise ValueError, "Cannot compute the score: no examples."
44
45
46def split_by_iterations(res):
47    """Split ExperimentResults of a multiple iteratation test into a list
48    of ExperimentResults, one for each iteration.
49    """
50    if res.number_of_iterations < 2:
51        return [res]
52
53    ress = [Orange.evaluation.testing.ExperimentResults(
54                1, res.classifier_names, res.class_values,
55                res.weights, classifiers=res.classifiers,
57            for _ in range(res.number_of_iterations)]
58    for te in res.results:
59        ress[te.iteration_number].results.append(te)
60    return ress
61
62def split_by_classifiers(res):
63    """Split an instance of :obj:ExperimentResults into a list of
64    :obj:ExperimentResults, one for each classifier.
65    """
66    split_res = []
67    for i in range(len(res.classifierNames)):
68        r = Orange.evaluation.testing.ExperimentResults(res.numberOfIterations,
69                    [res.classifierNames[i]], res.classValues,
70                    weights=res.weights, baseClass=res.baseClass,
71                    classifiers=[res.classifiers[i]] if res.classifiers else [],
72                    test_type=res.test_type, labels=res.labels)
73        r.results = []
74        for te in res.results:
75            r.results.append(Orange.evaluation.testing.TestedExample(
76                te.iterationNumber, te.actualClass, n=1, weight=te.weight))
77            r.results[-1].classes = [te.classes[i]]
78            r.results[-1].probabilities = [te.probabilities[i]]
79        split_res.append(r)
80    return split_res
81
82
83def class_probabilities_from_res(res, **argkw):
84    """Calculate class probabilities."""
85    probs = [0.] * len(res.class_values)
86    if argkw.get("unweighted", 0) or not res.weights:
87        for tex in res.results:
88            probs[int(tex.actual_class)] += 1.
89        totweight = gettotsize(res)
90    else:
91        totweight = 0.
92        for tex in res.results:
93            probs[tex.actual_class] += tex.weight
94            totweight += tex.weight
95        check_non_zero(totweight)
96    return [prob / totweight for prob in probs]
97
98
99@deprecated_keywords({
100    "foldN": "fold_n",
101    "reportSE": "report_se",
102    "iterationIsOuter": "iteration_is_outer"})
103def statistics_by_folds(stats, fold_n, report_se, iteration_is_outer):
104    # remove empty folds, turn the matrix so that learner is outer
105    if iteration_is_outer:
106        if not stats:
107            raise ValueError, "Cannot compute the score: no examples or sum of weights is 0."
108        number_of_learners = len(stats[0])
109        stats = filter(lambda (x, fN): fN > 0, zip(stats, fold_n))
110        stats = [[x[lrn] / fN for x, fN in stats]
111                 for lrn in range(number_of_learners)]
112    else:
113        stats = [[x / Fn for x, Fn in filter(lambda (x, Fn): Fn > 0,
114                 zip(lrnD, fold_n))] for lrnD in stats]
115
116    if not stats:
117        raise ValueError, "Cannot compute the score: no classifiers"
118    if not stats[0]:
119        raise ValueError, "Cannot compute the score: no examples or sum of weights is 0."
120
121    if report_se:
122        return [(statc.mean(x), statc.sterr(x)) for x in stats]
123    else:
124        return [statc.mean(x) for x in stats]
125
126def ME(res, **argkw):
127    MEs = [0.] * res.number_of_learners
128
129    if argkw.get("unweighted", 0) or not res.weights:
130        for tex in res.results:
131            MEs = map(lambda res, cls, ac = float(tex.actual_class):
132                      res + abs(float(cls) - ac), MEs, tex.classes)
133        totweight = gettotsize(res)
134    else:
135        for tex in res.results:
136            MEs = map(lambda res, cls, ac=float(tex.actual_class), tw=tex.weight:
137                       res + tw * abs(float(cls) - ac), MEs, tex.classes)
138        totweight = gettotweight(res)
139
140    return [x / totweight for x in MEs]
141
142MAE = ME
143
144
145class ConfusionMatrix:
146    """
147    Classification result summary.
148    """
149    #: True Positive predictions
150    TP = 0.
151    #:True Negative predictions
152    TN = 0.
153    #:False Positive predictions
154    FP = 0.
155    #: False Negative predictions
156    FN = 0.
157
158    @deprecated_keywords({"predictedPositive": "predicted_positive",
159                          "isPositive": "is_positive"})
160    def addTFPosNeg(self, predicted_positive, is_positive, weight=1.0):
161        """
162        Update confusion matrix with result of a single classification.
163
164        :param predicted_positive: positive class value was predicted
165        :param is_positive: correct class value is positive
166        :param weight: weight of the selected instance
167         """
168        if predicted_positive:
169            if is_positive:
170                self.TP += weight
171            else:
172                self.FP += weight
173        else:
174            if is_positive:
175                self.FN += weight
176            else:
177                self.TN += weight
178
179
180
181#########################################################################
182# PERFORMANCE MEASURES:
183# Scores for evaluation of numeric predictions
184
185def check_argkw(dct, lst):
186    """Return True if any item from lst has a non-zero value in dct."""
187    return reduce(lambda x, y: x or y, [dct.get(k, 0) for k in lst])
188
189def regression_error(res, **argkw):
190    """Return the regression error (default: MSE)."""
191    if argkw.get("SE", 0) and res.number_of_iterations > 1:
192        # computes the scores for each iteration, then averages
193        scores = [[0.] * res.number_of_iterations
194                  for _ in range(res.number_of_learners)]
195        norm = None
196        if argkw.get("norm-abs", 0) or argkw.get("norm-sqr", 0):
197            norm = [0.] * res.number_of_iterations
198
199        # counts examples in each iteration
200        nIter = [0] * res.number_of_iterations
201        # average class in each iteration
202        a = [0] * res.number_of_iterations
203        for tex in res.results:
204            nIter[tex.iteration_number] += 1
205            a[tex.iteration_number] += float(tex.actual_class)
206        a = [a[i] / nIter[i] for i in range(res.number_of_iterations)]
207
208        if argkw.get("unweighted", 0) or not res.weights:
209            # iterate accross test cases
210            for tex in res.results:
211                ai = float(tex.actual_class)
212                nIter[tex.iteration_number] += 1
213
214                # compute normalization, if required
215                if argkw.get("norm-abs", 0):
216                    norm[tex.iteration_number] += abs(ai - a[tex.iteration_number])
217                elif argkw.get("norm-sqr", 0):
218                    norm[tex.iteration_number] += (ai - a[tex.iteration_number])**2
219
220                # iterate accross results of different regressors
221                for i, cls in enumerate(tex.classes):
222                    if argkw.get("abs", 0):
223                        scores[i][tex.iteration_number] += abs(float(cls) - ai)
224                    else:
225                        scores[i][tex.iteration_number] += (float(cls) - ai)**2
226        else: # unweighted != 0
227            raise NotImplementedError, "weighted error scores with SE not implemented yet"
228
229        if argkw.get("norm-abs") or argkw.get("norm-sqr"):
230            scores = [[x / n for x, n in zip(y, norm)] for y in scores]
231        else:
232            scores = [[x / ni for x, ni in zip(y, nIter)] for y in scores]
233
234        if argkw.get("R2"):
235            scores = [[1.0 - x for x in y] for y in scores]
236
237        if argkw.get("sqrt", 0):
238            scores = [[math.sqrt(x) for x in y] for y in scores]
239
240        return [(statc.mean(x), statc.std(x)) for x in scores]
241
242    else: # single iteration (testing on a single test set)
243        scores = [0.] * res.number_of_learners
244        norm = 0.
245
246        if argkw.get("unweighted", 0) or not res.weights:
247            a = sum([tex.actual_class for tex in res.results]) \
248                / len(res.results)
249            for tex in res.results:
250                if argkw.get("abs", 0):
251                    scores = map(lambda res, cls, ac=float(tex.actual_class):
252                                 res + abs(float(cls) - ac), scores, tex.classes)
253                else:
254                    scores = map(lambda res, cls, ac=float(tex.actual_class):
255                                 res + (float(cls) - ac)**2, scores, tex.classes)
256
257                if argkw.get("norm-abs", 0):
258                    norm += abs(tex.actual_class - a)
259                elif argkw.get("norm-sqr", 0):
260                    norm += (tex.actual_class - a)**2
261            totweight = gettotsize(res)
262        else:
263            # UNFINISHED
264            MSEs = [0.] * res.number_of_learners
265            for tex in res.results:
266                MSEs = map(lambda res, cls, ac=float(tex.actual_class),
267                           tw=tex.weight:
268                           res + tw * (float(cls) - ac)**2, MSEs, tex.classes)
269            totweight = gettotweight(res)
270
271        if argkw.get("norm-abs", 0) or argkw.get("norm-sqr", 0):
272            scores = [s / norm for s in scores]
273        else: # normalize by number of instances (or sum of weights)
274            scores = [s / totweight for s in scores]
275
276        if argkw.get("R2"):
277            scores = [1. - s for s in scores]
278
279        if argkw.get("sqrt", 0):
280            scores = [math.sqrt(x) for x in scores]
281
282        return scores
283
284def MSE(res, **argkw):
285    """Compute mean-squared error."""
286    return regression_error(res, **argkw)
287
288def RMSE(res, **argkw):
289    """Compute root mean-squared error."""
290    argkw.setdefault("sqrt", True)
291    return regression_error(res, **argkw)
292
293def MAE(res, **argkw):
294    """Compute mean absolute error."""
295    argkw.setdefault("abs", True)
296    return regression_error(res, **argkw)
297
298def RSE(res, **argkw):
299    """Compute relative squared error."""
300    argkw.setdefault("norm-sqr", True)
301    return regression_error(res, **argkw)
302
303def RRSE(res, **argkw):
304    """Compute relative squared error."""
305    argkw.setdefault("norm-sqr", True)
306    argkw.setdefault("sqrt", True)
307    return regression_error(res, **argkw)
308
309def RAE(res, **argkw):
310    """Compute relative absolute error."""
311    argkw.setdefault("abs", True)
312    argkw.setdefault("norm-abs", True)
313    return regression_error(res, **argkw)
314
315def R2(res, **argkw):
316    """Compute the coefficient of determination, R-squared."""
317    argkw.setdefault("norm-sqr", True)
318    argkw.setdefault("R2", True)
319    return regression_error(res, **argkw)
320
321def MSE_old(res, **argkw):
322    """Compute mean-squared error."""
323    if argkw.get("SE", 0) and res.number_of_iterations > 1:
324        MSEs = [[0.] * res.number_of_iterations
325                for _ in range(res.number_of_learners)]
326        nIter = [0] * res.number_of_iterations
327        if argkw.get("unweighted", 0) or not res.weights:
328            for tex in res.results:
329                ac = float(tex.actual_class)
330                nIter[tex.iteration_number] += 1
331                for i, cls in enumerate(tex.classes):
332                    MSEs[i][tex.iteration_number] += (float(cls) - ac)**2
333        else:
334            raise ValueError, "weighted RMSE with SE not implemented yet"
335        MSEs = [[x / ni for x, ni in zip(y, nIter)] for y in MSEs]
336        if argkw.get("sqrt", 0):
337            MSEs = [[math.sqrt(x) for x in y] for y in MSEs]
338        return [(statc.mean(x), statc.std(x)) for x in MSEs]
339
340    else:
341        MSEs = [0.] * res.number_of_learners
342        if argkw.get("unweighted", 0) or not res.weights:
343            for tex in res.results:
344                MSEs = map(lambda res, cls, ac=float(tex.actual_class):
345                           res + (float(cls) - ac)**2, MSEs, tex.classes)
346            totweight = gettotsize(res)
347        else:
348            for tex in res.results:
349                MSEs = map(lambda res, cls, ac=float(tex.actual_class),
350                           tw=tex.weight: res + tw * (float(cls) - ac)**2,
351                           MSEs, tex.classes)
352            totweight = gettotweight(res)
353
354        if argkw.get("sqrt", 0):
355            MSEs = [math.sqrt(x) for x in MSEs]
356        return [x / totweight for x in MSEs]
357
358def RMSE_old(res, **argkw):
359    """Compute root mean-squared error."""
360    argkw.setdefault("sqrt", 1)
361    return MSE_old(res, **argkw)
362
363#########################################################################
364# PERFORMANCE MEASURES:
365# Scores for evaluation of classifiers
366
367class CA(list):
368    """
369    Compute percentage of matches between predicted and actual class.
370
371    :param test_results: :obj:~Orange.evaluation.testing.ExperimentResults
372                         or list of :obj:ConfusionMatrix.
373    :param report_se: include standard error in result.
374    :param ignore_weights: ignore instance weights.
375    :rtype: list of scores, one for each learner.
376
377    Standard errors are estimated from deviation of CAs across folds (if
378    test_results were produced by cross_validation) or approximated under
379    the assumption of normal distribution otherwise.
380    """
381    CONFUSION_MATRIX = 0
382    CONFUSION_MATRIX_LIST = 1
383    CLASSIFICATION = 2
384    CROSS_VALIDATION = 3
385
386    @deprecated_keywords({"reportSE": "report_se",
387                          "unweighted": "ignore_weights"})
388    def __init__(self, test_results, report_se = False, ignore_weights=False):
389        super(CA, self).__init__()
390        self.report_se = report_se
391        self.ignore_weights = ignore_weights
392
393        input_type = self.get_input_type(test_results)
394        if input_type == self.CONFUSION_MATRIX:
395            self[:] = [self.from_confusion_matrix(test_results)]
396        elif input_type == self.CONFUSION_MATRIX_LIST:
397            self[:] = self.from_confusion_matrix_list(test_results)
398        elif input_type == self.CLASSIFICATION:
399            self[:] = self.from_classification_results(test_results)
400        elif input_type == self.CROSS_VALIDATION:
401            self[:] = self.from_crossvalidation_results(test_results)
402
403    def from_confusion_matrix(self, cm):
404        all_predictions = 0.
405        correct_predictions = 0.
406        if isinstance(cm, ConfusionMatrix):
407            all_predictions += cm.TP + cm.FN + cm.FP + cm.TN
408            correct_predictions += cm.TP + cm.TN
409        else:
410            for r, row in enumerate(cm):
411                for c, column in enumerate(row):
412                    if r == c:
413                        correct_predictions += column
414                    all_predictions += column
415
416        check_non_zero(all_predictions)
417        ca = correct_predictions / all_predictions
418
419        if self.report_se:
420            return ca, ca * (1 - ca) / math.sqrt(all_predictions)
421        else:
422            return ca
423
424    def from_confusion_matrix_list(self, confusion_matrices):
425        return [self.from_confusion_matrix(cm) for cm in confusion_matrices]
426
427    def from_classification_results(self, test_results):
428        CAs = [0.] * test_results.number_of_learners
429        totweight = 0.
430        for tex in test_results.results:
431            w = 1. if self.ignore_weights else tex.weight
432            CAs = map(lambda res, cls: res + (cls == tex.actual_class and w),
433                      CAs, tex.classes)
434            totweight += w
435        check_non_zero(totweight)
436        ca = [x / totweight for x in CAs]
437
438        if self.report_se:
439            return [(x, x * (1 - x) / math.sqrt(totweight)) for x in ca]
440        else:
441            return ca
442
443    def from_crossvalidation_results(self, test_results):
444        CAsByFold = [[0.] * test_results.number_of_iterations
445                     for _ in range(test_results.number_of_learners)]
446        foldN = [0.] * test_results.number_of_iterations
447
448        for tex in test_results.results:
449            w = 1. if self.ignore_weights else tex.weight
450            for lrn in range(test_results.number_of_learners):
451                CAsByFold[lrn][tex.iteration_number] += (tex.classes[lrn] ==
452                    tex.actual_class) and w
453            foldN[tex.iteration_number] += w
454
455        return statistics_by_folds(CAsByFold, foldN, self.report_se, False)
456
457    def get_input_type(self, test_results):
458        if isinstance(test_results, ConfusionMatrix):
459            return self.CONFUSION_MATRIX
460        elif isinstance(test_results, testing.ExperimentResults):
461            if test_results.number_of_iterations == 1:
462                return self.CLASSIFICATION
463            else:
464                return self.CROSS_VALIDATION
465        elif isinstance(test_results, list):
466            return self.CONFUSION_MATRIX_LIST
467
468
469@deprecated_keywords({"reportSE": "report_se",
470                      "unweighted": "ignore_weights"})
471def AP(res, report_se=False, ignore_weights=False, **argkw):
472    """Compute the average probability assigned to the correct class."""
473    if res.number_of_iterations == 1:
474        APs=[0.] * res.number_of_learners
475        if ignore_weights or not res.weights:
476            for tex in res.results:
477                APs = map(lambda res, probs: res + probs[tex.actual_class],
478                          APs, tex.probabilities)
479            totweight = gettotsize(res)
480        else:
481            totweight = 0.
482            for tex in res.results:
483                APs = map(lambda res, probs: res + probs[tex.actual_class] *
484                          tex.weight, APs, tex.probabilities)
485                totweight += tex.weight
486        check_non_zero(totweight)
487        return [AP / totweight for AP in APs]
488
489    APsByFold = [[0.] * res.number_of_learners
490                 for _ in range(res.number_of_iterations)]
491    foldN = [0.] * res.number_of_iterations
492    if ignore_weights or not res.weights:
493        for tex in res.results:
494            APsByFold[tex.iteration_number] = map(lambda res, probs:
495                res + probs[tex.actual_class],
496                APsByFold[tex.iteration_number], tex.probabilities)
497            foldN[tex.iteration_number] += 1
498    else:
499        for tex in res.results:
500            APsByFold[tex.iteration_number] = map(lambda res, probs:
501                res + probs[tex.actual_class] * tex.weight,
502                APsByFold[tex.iteration_number], tex.probabilities)
503            foldN[tex.iteration_number] += tex.weight
504
505    return statistics_by_folds(APsByFold, foldN, report_se, True)
506
507
508@deprecated_keywords({"reportSE": "report_se",
509                      "unweighted": "ignore_weights"})
510def Brier_score(res, report_se=False, ignore_weights=False, **argkw):
511    """Compute the Brier score, defined as the average (over test instances)
512    of :math:\sum_x(t(x) - p(x))^2, where x is a class value, t(x) is 1 for
513    the actual class value and 0 otherwise, and p(x) is the predicted
514    probability of x.
515    """
516    # Computes an average (over examples) of sum_x(t(x) - p(x))^2, where
517    #    x is class,
518    #    t(x) is 0 for 'wrong' and 1 for 'correct' class
519    #    p(x) is predicted probabilty.
520    # There's a trick: since t(x) is zero for all classes but the
521    # correct one (c), we compute the sum as sum_x(p(x)^2) - 2*p(c) + 1
522    # Since +1 is there for each example, it adds 1 to the average
523    # We skip the +1 inside the sum and add it just at the end of the function
524    # We take max(result, 0) to avoid -0.0000x due to rounding errors
525
526    if res.number_of_iterations == 1:
527        MSEs=[0.] * res.number_of_learners
528        if ignore_weights or not res.weights:
529            totweight = 0.
530            for tex in res.results:
531                MSEs = map(lambda res, probs: res + reduce(
532                    lambda s, pi: s + pi**2, probs, 0) -
533                    2 * probs[tex.actual_class], MSEs, tex.probabilities)
534                totweight += tex.weight
535        else:
536            for tex in res.results:
537                MSEs = map(lambda res, probs: res + tex.weight * reduce(
538                    lambda s, pi: s + pi**2, probs, 0) -
539                    2 * probs[tex.actual_class], MSEs, tex.probabilities)
540            totweight = gettotweight(res)
541        check_non_zero(totweight)
542        if report_se:
543            ## change this, not zero!!!
544            return [(max(x / totweight + 1., 0), 0) for x in MSEs]
545        else:
546            return [max(x / totweight + 1., 0) for x in MSEs]
547
548    BSs = [[0.] * res.number_of_learners
549           for _ in range(res.number_of_iterations)]
550    foldN = [0.] * res.number_of_iterations
551
552    if ignore_weights or not res.weights:
553        for tex in res.results:
554            BSs[tex.iteration_number] = map(lambda rr, probs: rr + reduce(
555                lambda s, pi: s + pi**2, probs, 0) -
556                2 * probs[tex.actual_class], BSs[tex.iteration_number],
557                tex.probabilities)
558            foldN[tex.iteration_number] += 1
559    else:
560        for tex in res.results:
561            BSs[tex.iteration_number] = map(lambda res, probs:
562                res + tex.weight * reduce(lambda s, pi: s + pi**2, probs, 0) -
563                2 * probs[tex. actual_class], BSs[tex.iteration_number],
564                tex.probabilities)
565            foldN[tex.iteration_number] += tex.weight
566
567    stats = statistics_by_folds(BSs, foldN, report_se, True)
568    if report_se:
569        return [(x + 1., y) for x, y in stats]
570    else:
571        return [x + 1. for x in stats]
572
573def BSS(res, **argkw):
574    return [1 - x / 2 for x in apply(Brier_score, (res, ), argkw)]
575
576def IS_ex(Pc, P):
577    """Pc aposterior probability, P aprior"""
578    if Pc >= P:
579        return -log2(P) + log2(Pc)
580    else:
581        return -(-log2(1 - P) + log2(1 - Pc))
582
583
584@deprecated_keywords({"reportSE": "report_se"})
585def IS(res, apriori=None, report_se=False, **argkw):
586    """Compute the information score as defined by
587    Kononenko and Bratko (1991) \
589    Argument :obj:apriori gives the apriori class
590    distribution; if it is omitted, the class distribution is computed from
591    the actual classes of examples in :obj:res.
592    """
593    if not apriori:
594        apriori = class_probabilities_from_res(res)
595
596    if res.number_of_iterations == 1:
597        ISs = [0.] * res.number_of_learners
598        if argkw.get("unweighted", 0) or not res.weights:
599            for tex in res.results:
600              for i in range(len(tex.probabilities)):
601                    cls = tex.actual_class
602                    ISs[i] += IS_ex(tex.probabilities[i][cls], apriori[cls])
603            totweight = gettotsize(res)
604        else:
605            for tex in res.results:
606              for i in range(len(tex.probabilities)):
607                    cls = tex.actual_class
608                    ISs[i] += (IS_ex(tex.probabilities[i][cls], apriori[cls]) *
609                               tex.weight)
610            totweight = gettotweight(res)
611        if report_se:
612            return [(IS / totweight, 0) for IS in ISs]
613        else:
614            return [IS / totweight for IS in ISs]
615
616
617    ISs = [[0.] * res.number_of_iterations
618           for _ in range(res.number_of_learners)]
619    foldN = [0.] * res.number_of_iterations
620
621    # compute info scores for each fold
622    if argkw.get("unweighted", 0) or not res.weights:
623        for tex in res.results:
624            for i in range(len(tex.probabilities)):
625                cls = tex.actual_class
626                ISs[i][tex.iteration_number] += IS_ex(tex.probabilities[i][cls],
627                                                apriori[cls])
628            foldN[tex.iteration_number] += 1
629    else:
630        for tex in res.results:
631            for i in range(len(tex.probabilities)):
632                cls = tex.actual_class
633                ISs[i][tex.iteration_number] += IS_ex(tex.probabilities[i][cls],
634                                                apriori[cls]) * tex.weight
635            foldN[tex.iteration_number] += tex.weight
636
637    return statistics_by_folds(ISs, foldN, report_se, False)
638
639
640def Wilcoxon(res, statistics, **argkw):
641    res1, res2 = [], []
642    for ri in split_by_iterations(res):
643        stats = statistics(ri, **argkw)
644        if len(stats) != 2:
645            raise TypeError, "Wilcoxon compares two classifiers, no more, no less"
646        res1.append(stats[0])
647        res2.append(stats[1])
648    return statc.wilcoxont(res1, res2)
649
650def rank_difference(res, statistics, **argkw):
651    if not res.results:
652        raise TypeError, "no experiments"
653
654    k = len(res.results[0].classes)
655    if k < 2:
656        raise TypeError, "nothing to compare (less than two classifiers given)"
657    if k == 2:
658        return apply(Wilcoxon, (res, statistics), argkw)
659    else:
660        return apply(Friedman, (res, statistics), argkw)
661
662
663@deprecated_keywords({"res": "test_results",
664                      "classIndex": "class_index",
665                      "unweighted": "ignore_weights"})
666def confusion_matrices(test_results, class_index=-1,
667                       ignore_weights=False, cutoff=.5):
668    """
669    Return confusion matrices for test_results.
670
671    :param test_results: test results
672    :param class_index: index of class value for which the confusion matrices
673                        are to be computed.
674    :param ignore_weights: ignore instance weights.
675    :params cutoff: cutoff for probability
676
677    :rtype: list of :obj:ConfusionMatrix
678    """
679    tfpns = [ConfusionMatrix() for _ in range(test_results.number_of_learners)]
680
681    if class_index < 0:
682        numberOfClasses = len(test_results.class_values)
683        if class_index < -1 or numberOfClasses > 2:
684            cm = [[[0.] * numberOfClasses for _ in range(numberOfClasses)]
685                  for _ in range(test_results.number_of_learners)]
686            if ignore_weights or not test_results.weights:
687                for tex in test_results.results:
688                    trueClass = int(tex.actual_class)
689                    for li, pred in enumerate(tex.classes):
690                        predClass = int(pred)
691                        if predClass < numberOfClasses:
692                            cm[li][trueClass][predClass] += 1
693            else:
694                for tex in test_results.results:
695                    trueClass = int(tex.actual_class)
696                    for li, pred in tex.classes:
697                        predClass = int(pred)
698                        if predClass < numberOfClasses:
699                            cm[li][trueClass][predClass] += tex.weight
700            return cm
701
702        elif test_results.baseClass >= 0:
703            class_index = test_results.baseClass
704        else:
705            class_index = 1
706
707    if cutoff != .5:
708        if ignore_weights or not test_results.weights:
709            for lr in test_results.results:
710                isPositive=(lr.actual_class==class_index)
711                for i in range(test_results.number_of_learners):
713                                         cutoff, isPositive)
714        else:
715            for lr in test_results.results:
716                isPositive=(lr.actual_class == class_index)
717                for i in range(test_results.number_of_learners):
719                                         cutoff, isPositive, lr.weight)
720    else:
721        if ignore_weights or not test_results.weights:
722            for lr in test_results.results:
723                isPositive = (lr.actual_class == class_index)
724                for i in range(test_results.number_of_learners):
726                                         isPositive)
727        else:
728            for lr in test_results.results:
729                isPositive = (lr.actual_class == class_index)
730                for i in range(test_results.number_of_learners):
732                                         isPositive, lr.weight)
733    return tfpns
734
735
736# obsolete (renamed)
737compute_confusion_matrices = confusion_matrices
738
739
740@deprecated_keywords({"confusionMatrix": "confusion_matrix"})
741def confusion_chi_square(confusion_matrix):
742    """
743    Return chi square statistic of the confusion matrix
744    (higher value indicates that prediction is not by chance).
745    """
746    if isinstance(confusion_matrix, ConfusionMatrix) or \
747       not isinstance(confusion_matrix[1], list):
748        return _confusion_chi_square(confusion_matrix)
749    else:
750        return map(_confusion_chi_square, confusion_matrix)
751
752def _confusion_chi_square(confusion_matrix):
753    if isinstance(confusion_matrix, ConfusionMatrix):
754        c = confusion_matrix
755        confusion_matrix = [[c.TP, c.FN], [c.FP, c.TN]]
756    dim = len(confusion_matrix)
757    rowPriors = [sum(r) for r in confusion_matrix]
758    colPriors = [sum(r[i] for r in confusion_matrix) for i in range(dim)]
759    total = sum(rowPriors)
760    rowPriors = [r / total for r in rowPriors]
761    colPriors = [r / total for r in colPriors]
762    ss = 0
763    for ri, row in enumerate(confusion_matrix):
764        for ci, o in enumerate(row):
765            e = total * rowPriors[ri] * colPriors[ci]
766            if not e:
767                return -1, -1, -1
768            ss += (o - e)**2 / e
769    df = (dim - 1)**2
770    return ss, df, statc.chisqprob(ss, df)
771
772class CMScore(list):
773    """
774    :param test_results: :obj:~Orange.evaluation.testing.ExperimentResults
775                         or list of :obj:ConfusionMatrix.
776    :rtype: list of scores, one for each learner."""
777    def __new__(cls, test_results, **kwargs):
778        self = list.__new__(cls)
779        if isinstance(test_results, ConfusionMatrix):
780            self.__init__(test_results, **kwargs)
781            return self[0]
782        return self
783
784
785    @deprecated_keywords({"confm": "test_results"})
786    def __init__(self, test_results=None):
787        super(CMScore, self).__init__()
788
789        if test_results is not None:
790            self[:] = self.__call__(test_results)
791
792    def __call__(self, test_results):
793        if isinstance(test_results, testing.ExperimentResults):
794            test_results = confusion_matrices(test_results, class_index=1)
795        if isinstance(test_results, ConfusionMatrix):
796            test_results = [test_results]
797
798        return map(self.compute, test_results)
799
800
801
802class Sensitivity(CMScore):
803    __doc__ = """Compute sensitivity
804    <http://en.wikipedia.org/wiki/Sensitivity_and_specificity>_ (proportion
805    of actual positives which are correctly identified as such).
806    """ + CMScore.__doc__
807    @classmethod
808    def compute(self, confusion_matrix):
809        tot = confusion_matrix.TP+confusion_matrix.FN
810        if tot < 1e-6:
811            import warnings
812            warnings.warn("Can't compute sensitivity: one or both classes have no instances")
813            return None
814
815        return confusion_matrix.TP / tot
816
817
818class Recall(Sensitivity):
819    __doc__ = """ Compute recall
820    <http://en.wikipedia.org/wiki/Precision_and_recall>_
821    (fraction of relevant instances that are retrieved).
822    """ + CMScore.__doc__
823    pass # Recall == Sensitivity
824
825
826class Specificity(CMScore):
827    __doc__ = """Compute specificity
828    <http://en.wikipedia.org/wiki/Sensitivity_and_specificity>_
829    (proportion of negatives which are correctly identified).
830    """ + CMScore.__doc__
831    @classmethod
832    def compute(self, confusion_matrix):
833        tot = confusion_matrix.FP+confusion_matrix.TN
834        if tot < 1e-6:
835            import warnings
836            warnings.warn("Can't compute specificity: one or both classes have no instances")
837            return None
838        return confusion_matrix.TN / tot
839
840
841class PPV(CMScore):
842    __doc__ = """Compute positive predictive value
843    <http://en.wikipedia.org/wiki/Positive_predictive_value>_ (proportion of
844    subjects with positive test results who are correctly diagnosed).
845    """ + CMScore.__doc__
846    @classmethod
847    def compute(self, confusion_matrix):
848        tot = confusion_matrix.TP + confusion_matrix.FP
849        if tot < 1e-6:
850            import warnings
851            warnings.warn("Can't compute PPV: one or both classes have no instances")
852            return None
853        return confusion_matrix.TP/tot
854
855
856class Precision(PPV):
857    __doc__ = """Compute precision <http://en.wikipedia.org/wiki/Precision_and_recall>_
858    (retrieved instances that are relevant).
859    """ + CMScore.__doc__
860    pass # Precision == PPV
861
862
863class NPV(CMScore):
864    __doc__ = """Compute negative predictive value
865    <http://en.wikipedia.org/wiki/Negative_predictive_value>_ (proportion of
866    subjects with a negative test result who are correctly diagnosed).
867     """ + CMScore.__doc__
868    @classmethod
869    def compute(self, confusion_matrix):
870        tot = confusion_matrix.FN + confusion_matrix.TN
871        if tot < 1e-6:
872            import warnings
873            warnings.warn("Can't compute NPV: one or both classes have no instances")
874            return None
875        return confusion_matrix.TN / tot
876
877
878class F1(CMScore):
879    __doc__ = """Return F1 score
880    <http://en.wikipedia.org/wiki/F1_score>_
881    (harmonic mean of precision and recall).
882    """ + CMScore.__doc__
883    @classmethod
884    def compute(self, confusion_matrix):
885        p = Precision.compute(confusion_matrix)
886        r = Recall.compute(confusion_matrix)
887        if p is not None and r is not None and (p+r) != 0:
888            return 2. * p * r / (p + r)
889        else:
890            import warnings
891            warnings.warn("Can't compute F1: P + R is zero or not defined")
892            return None
893
894
895class Falpha(CMScore):
896    __doc__ = """Compute the alpha-mean of precision and recall over the given confusion
897    matrix.
898    """ + CMScore.__doc__
899
900    def __init__(self, test_results, alpha=1.):
901        self.alpha = alpha
902        super(Falpha, self).__init__(test_results)
903
904    def compute(self, confusion_matrix):
905        p = Precision.compute(confusion_matrix)
906        r = Recall.compute(confusion_matrix)
907        return (1. + self.alpha) * p * r / (self.alpha * p + r)
908
909
910class MCC(CMScore):
911    __doc__ = """Compute Matthew correlation coefficient
912    <http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>_
913    (correlation coefficient between the observed and predicted binary
914    classifications).
915    """ + CMScore.__doc__
916    @classmethod
917    def compute(self, cm):
918        # code by Boris Gorelik
919        TP, TN, FP, FN = cm.TP, cm.TN, cm.FP, cm.FN
920
921        try:
922            return (TP*TN - FP*FN) /\
923                 math.sqrt((TP+FP) * (TP+FN) * (TN+ FP) * (TN+FN))
924        except ZeroDivisionError:
925            # Zero division occurs when there is either no true positives
926            # or no true negatives i.e. the problem contains only one
927            # type of classes.
928            import warnings
929            warnings.warn("Can't compute MCC: TP or TN is zero or not defined")
930
931
932@deprecated_keywords({"bIsListOfMatrices": "b_is_list_of_matrices"})
933def scotts_pi(confusion_matrix, b_is_list_of_matrices=True):
934   """Compute Scott's Pi for measuring inter-rater agreement for nominal data
935
936   http://en.wikipedia.org/wiki/Scott%27s_Pi
937   Scott's Pi is a statistic for measuring inter-rater reliability for nominal
938   raters.
939
940   @param confusion_matrix: confusion matrix, or list of confusion matrices.
941                            To obtain non-binary confusion matrix, call
942                            Orange.evaluation.scoring.confusion_matrices
943                            and set the class_index parameter to -2.
944   @param b_is_list_of_matrices: specifies whether confm is list of matrices.
945                            This function needs to operate on non-binary
946                            confusion matrices, which are represented by python
947                            lists, therefore one needs a way to distinguish
948                            between a single matrix and list of matrices
949   """
950
951   if b_is_list_of_matrices:
952       try:
953           return [scotts_pi(cm, b_is_list_of_matrices=False)
954                   for cm in confusion_matrix]
955       except TypeError:
956           # Nevermind the parameter, maybe this is a "conventional" binary
957           # confusion matrix and bIsListOfMatrices was specified by mistake
958           return scottsPiSingle(confusion_matrix, bIsListOfMatrices=False)
959   else:
960       if isinstance(confusion_matrix, ConfusionMatrix):
961           confusion_matrix = numpy.array([[confusion_matrix.TP,
962               confusion_matrix.FN], [confusion_matrix.FP,
963               confusion_matrix.TN]], dtype=float)
964       else:
965           confusion_matrix = numpy.array(confusion_matrix, dtype=float)
966
967       marginalSumOfRows = numpy.sum(confusion_matrix, axis=0)
968       marginalSumOfColumns = numpy.sum(confusion_matrix, axis=1)
969       jointProportion = (marginalSumOfColumns + marginalSumOfRows) / \
970                           (2. * numpy.sum(confusion_matrix))
971       # In the eq. above, 2. is what the Wikipedia page calls
972       # the number of annotators. Here we have two annotators:
973       # the observed (true) labels (annotations) and the predicted by
974       # the learners.
975
976       prExpected = numpy.sum(jointProportion ** 2)
977       prActual = numpy.sum(numpy.diag(confusion_matrix)) / \
978                  numpy.sum(confusion_matrix)
979
980       ret = (prActual - prExpected) / (1.0 - prExpected)
981       return ret
982
983# Backward compatibility
984sens = Sensitivity
985spec = Specificity
986precision = Precision
987recall = Recall
988
989
990
991@deprecated_keywords({"classIndex": "class_index",
992                      "unweighted": "ignore_weights"})
993def AUCWilcoxon(res, class_index=-1, ignore_weights=False, **argkw):
994    """Compute the area under ROC (AUC) and its standard error using
995    Wilcoxon's approach proposed by Hanley and McNeal (1982). If
996    :obj:class_index is not specified, the first class is used as
997    "the positive" and others are negative. The result is a list of
998    tuples (aROC, standard error).
999
1000    If test results consist of multiple folds, you need to split them using
1001    :obj:split_by_iterations and perform this test on each fold separately.
1002    """
1003    useweights = res.weights and not ignore_weights
1004    problists, tots = corn.computeROCCumulative(res, class_index, useweights)
1005
1006    results = []
1007
1008    totPos, totNeg = tots[1], tots[0]
1009    N = totPos + totNeg
1010    for plist in problists:
1011        highPos, lowNeg = totPos, 0.
1012        W, Q1, Q2 = 0., 0., 0.
1013        for prob in plist:
1014            thisPos, thisNeg = prob[1][1], prob[1][0]
1015            highPos -= thisPos
1016            W += thisNeg * (highPos + thisPos / 2.)
1017            Q2 += thisPos * (lowNeg**2  + lowNeg*thisNeg  + thisNeg**2 / 3.)
1018            Q1 += thisNeg * (highPos**2 + highPos*thisPos + thisPos**2 / 3.)
1019
1020            lowNeg += thisNeg
1021
1022        W  /= (totPos*totNeg)
1023        Q1 /= (totNeg*totPos**2)
1024        Q2 /= (totPos*totNeg**2)
1025
1026        SE = math.sqrt((W * (1 - W) + (totPos - 1) * (Q1 - W**2) +
1027                       (totNeg - 1) * (Q2 - W**2)) / (totPos * totNeg))
1028        results.append((W, SE))
1029    return results
1030
1031AROC = AUCWilcoxon # for backward compatibility, AROC is obsolete
1032
1033
1034@deprecated_keywords({"classIndex": "class_index",
1035                      "unweighted": "ignore_weights"})
1036def compare_2_AUCs(res, lrn1, lrn2, class_index=-1,
1037                   ignore_weights=False, **argkw):
1038    return corn.compare2ROCs(res, lrn1, lrn2, class_index,
1039                             res.weights and not ignore_weights)
1040
1041# for backward compatibility, compare_2_AROCs is obsolete
1042compare_2_AROCs = compare_2_AUCs
1043
1044
1045@deprecated_keywords({"classIndex": "class_index"})
1046def compute_ROC(res, class_index=-1):
1047    """Compute a ROC curve as a list of (x, y) tuples, where x is
1048    1-specificity and y is sensitivity.
1049    """
1050    problists, tots = corn.computeROCCumulative(res, class_index)
1051
1052    results = []
1053    totPos, totNeg = tots[1], tots[0]
1054
1055    for plist in problists:
1056        curve=[(1., 1.)]
1057        TP, TN = totPos, 0.
1058        FN, FP = 0., totNeg
1059        for prob in plist:
1060            thisPos, thisNeg = prob[1][1], prob[1][0]
1061            # thisPos go from TP to FN
1062            TP -= thisPos
1063            FN += thisPos
1064            # thisNeg go from FP to TN
1065            TN += thisNeg
1066            FP -= thisNeg
1067
1068            sens = TP / (TP + FN)
1069            spec = TN / (FP + TN)
1070            curve.append((1 - spec, sens))
1071        results.append(curve)
1072
1073    return results
1074
1075## TC's implementation of algorithms, taken from:
1076## T Fawcett: ROC Graphs: Notes and Practical Considerations for
1077## Data Mining Researchers, submitted to KDD Journal.
1078def ROC_slope((P1x, P1y, P1fscore), (P2x, P2y, P2fscore)):
1079    if P1x == P2x:
1080        return 1e300
1081    return (P1y - P2y) / (P1x - P2x)
1082
1083
1084@deprecated_keywords({"keepConcavities": "keep_concavities"})
1086    if keep_concavities:
1087        R.append(P)
1088    else:
1089        while True:
1090            if len(R) < 2:
1091                R.append(P)
1092                return R
1093            else:
1094                T = R.pop()
1095                T2 = R[-1]
1096                if ROC_slope(T2, T) > ROC_slope(T, P):
1097                    R.append(T)
1098                    R.append(P)
1099                    return R
1100    return R
1101
1102
1103@deprecated_keywords({"classIndex": "class_index",
1104                      "keepConcavities": "keep_concavities"})
1105def TC_compute_ROC(res, class_index=-1, keep_concavities=1):
1106    problists, tots = corn.computeROCCumulative(res, class_index)
1107
1108    results = []
1109    P, N = tots[1], tots[0]
1110
1111    for plist in problists:
1112        ## corn gives an increasing by scores list, we need a decreasing
1113        plist.reverse()
1114        TP = 0.
1115        FP = 0.
1116        curve=[]
1117        fPrev = 10e300 # "infinity" score at 0., 0.
1118        for prob in plist:
1119            f = prob[0]
1120            if f != fPrev:
1121                if P:
1122                    tpr = TP / P
1123                else:
1124                    tpr = 0.
1125                if N:
1126                    fpr = FP / N
1127                else:
1128                    fpr = 0.
1129                curve = ROC_add_point((fpr, tpr, fPrev), curve,
1130                                      keep_concavities)
1131                fPrev = f
1132            thisPos, thisNeg = prob[1][1], prob[1][0]
1133            TP += thisPos
1134            FP += thisNeg
1135        if P:
1136            tpr = TP / P
1137        else:
1138            tpr = 0.
1139        if N:
1140            fpr = FP / N
1141        else:
1142            fpr = 0.
1143        curve = ROC_add_point((fpr, tpr, f), curve, keep_concavities) ## ugly
1144        results.append(curve)
1145
1146    return results
1147
1148## returns a list of points at the intersection of the tangential
1149## iso-performance line and the given ROC curve
1150## for given values of FPcost, FNcost and pval
1151def TC_best_thresholds_on_ROC_curve(FPcost, FNcost, pval, curve):
1152    m = (FPcost * (1. - pval)) / (FNcost * pval)
1153
1154    ## put the iso-performance line in point (0., 1.)
1155    x0, y0 = (0., 1.)
1156    x1, y1 = (1., 1. + m)
1157    d01 = math.sqrt((x1 - x0) * (x1 - x0) + (y1 - y0) * (y1 - y0))
1158
1159    ## calculate and find the closest point to the line
1160    firstp = 1
1161    mind = 0.
1162    a = x0 * y1 - x1 * y0
1163    closestPoints = []
1164    for (x, y, fscore) in curve:
1165        d = ((y0 - y1) * x + (x1 - x0) * y + a) / d01
1166        d = abs(d)
1167        if firstp or d < mind:
1168            mind, firstp = d, 0
1169            closestPoints = [(x, y, fscore)]
1170        else:
1171            if abs(d - mind) <= 0.0001: ## close enough
1172                closestPoints.append( (x, y, fscore) )
1173    return closestPoints
1174
1175def frange(start, end=None, inc=None):
1176    """A range function, that does accept float increments..."""
1177
1178    if end is None:
1179        end = start + 0.
1180        start = 0.
1181
1182    if inc is None or inc == 0:
1183        inc = 1.
1184
1185    L = [start]
1186    while 1:
1187        next = start + len(L) * inc
1188        if inc > 0 and next >= end:
1189            L.append(end)
1190            break
1191        elif inc < 0 and next <= end:
1192            L.append(end)
1193            break
1194        L.append(next)
1195
1196    return L
1197
1198## input ROCcurves are of form [ROCcurves1, ROCcurves2, ... ROCcurvesN],
1199## where ROCcurvesX is a set of ROC curves,
1200## where a (one) ROC curve is a set of (FP, TP) points
1201##
1202## for each (sub)set of input ROC curves
1203## returns the average ROC curve and an array of (vertical) standard deviations
1204@deprecated_keywords({"ROCcurves": "roc_curves"})
1205def TC_vertical_average_ROC(roc_curves, samples = 10):
1206    def INTERPOLATE((P1x, P1y, P1fscore), (P2x, P2y, P2fscore), X):
1207        if (P1x == P2x) or P1x < X > P2x or P1x > X < P2x:
1208            raise ValueError, "assumptions for interpolation are not met: P1 = %f,%f P2 = %f,%f X = %f" % (P1x, P1y, P2x, P2y, X)
1209        dx = float(P2x) - float(P1x)
1210        dy = float(P2y) - float(P1y)
1211        m = dy / dx
1212        return P1y + m * (X - P1x)
1213
1214    def TP_FOR_FP(FPsample, ROC, npts):
1215        i = 0
1216        while i < npts - 1:
1217            (fp, _, _) = ROC[i + 1]
1218            if fp <= FPsample:
1219                i += 1
1220            else:
1221                break
1222        (fp, tp, _) = ROC[i]
1223        if fp == FPsample:
1224            return tp
1225        elif fp < FPsample and i + 1 < len(ROC):
1226            return INTERPOLATE(ROC[i], ROC[i + 1], FPsample)
1227        elif fp < FPsample and i + 1 == len(ROC): # return the last
1228            return ROC[i][1]
1229        raise ValueError, "cannot compute: TP_FOR_FP in TC_vertical_average_ROC"
1230        #return 0.
1231
1232    average = []
1233    stdev = []
1234    for ROCS in roc_curves:
1235        npts = []
1236        for c in ROCS:
1237            npts.append(len(c))
1238        nrocs = len(ROCS)
1239
1240        TPavg = []
1241        TPstd = []
1242        for FPsample in frange(0., 1., 1. / samples):
1243            TPsum = []
1244            for i in range(nrocs):
1245                ##TPsum = TPsum + TP_FOR_FP(FPsample, ROCS[i], npts[i])
1246                TPsum.append(TP_FOR_FP(FPsample, ROCS[i], npts[i]))
1247            TPavg.append((FPsample, statc.mean(TPsum)))
1248            if len(TPsum) > 1:
1249                stdv = statc.std(TPsum)
1250            else:
1251                stdv = 0.
1252            TPstd.append(stdv)
1253
1254        average.append(TPavg)
1255        stdev.append(TPstd)
1256
1257    return average, stdev
1258
1259## input ROCcurves are of form [ROCcurves1, ROCcurves2, ... ROCcurvesN],
1260## where ROCcurvesX is a set of ROC curves,
1261## where a (one) ROC curve is a set of (FP, TP) points
1262##
1263## for each (sub)set of input ROC curves
1264## returns the average ROC curve, an array of vertical standard deviations and an array of horizontal standard deviations
1265@deprecated_keywords({"ROCcurves": "roc_curves"})
1266def TC_threshold_average_ROC(roc_curves, samples=10):
1267    def POINT_AT_THRESH(ROC, npts, thresh):
1268        i = 0
1269        while i < npts - 1:
1270            (px, py, pfscore) = ROC[i]
1271            if pfscore > thresh:
1272                i += 1
1273            else:
1274                break
1275        return ROC[i]
1276
1277    average = []
1278    stdevV = []
1279    stdevH = []
1280    for ROCS in roc_curves:
1281        npts = []
1282        for c in ROCS:
1283            npts.append(len(c))
1284        nrocs = len(ROCS)
1285
1286        T = []
1287        for c in ROCS:
1288            for (px, py, pfscore) in c:
1289##                try:
1290##                    T.index(pfscore)
1291##                except:
1292                T.append(pfscore)
1293        T.sort()
1294        T.reverse() ## ugly
1295
1296        TPavg = []
1297        TPstdV = []
1298        TPstdH = []
1299        for tidx in frange(0, (len(T) - 1.), float(len(T)) / samples):
1300            FPsum = []
1301            TPsum = []
1302            for i in range(nrocs):
1303                (fp, tp, _) = POINT_AT_THRESH(ROCS[i], npts[i], T[int(tidx)])
1304                FPsum.append(fp)
1305                TPsum.append(tp)
1306            TPavg.append((statc.mean(FPsum), statc.mean(TPsum)))
1307            ## vertical standard deviation
1308            if len(TPsum) > 1:
1309                stdv = statc.std(TPsum)
1310            else:
1311                stdv = 0.
1312            TPstdV.append( stdv )
1313            ## horizontal standard deviation
1314            if len(FPsum) > 1:
1315                stdh = statc.std(FPsum)
1316            else:
1317                stdh = 0.
1318            TPstdH.append( stdh )
1319
1320        average.append(TPavg)
1321        stdevV.append(TPstdV)
1322        stdevH.append(TPstdH)
1323
1324    return average, stdevV, stdevH
1325
1326## Calibration Curve
1327## returns an array of (curve, yesClassPredictions, noClassPredictions)
1328## elements, where:
1329##  - curve is an array of points (x, y) on the calibration curve
1330##  - yesClassRugPoints is an array of (x, 1) points
1331##  - noClassRugPoints is an array of (x, 0) points
1332@deprecated_keywords({"classIndex": "class_index"})
1333def compute_calibration_curve(res, class_index=-1):
1334    ## merge multiple iterations into one
1335    mres = Orange.evaluation.testing.ExperimentResults(1, res.classifier_names,
1336        res.class_values, res.weights, classifiers=res.classifiers,
1338    for te in res.results:
1339        mres.results.append(te)
1340
1341    problists, tots = corn.computeROCCumulative(mres, class_index)
1342
1343    results = []
1344
1345    bins = 10 ## divide interval between 0. and 1. into N bins
1346
1347    for plist in problists:
1348        yesClassRugPoints = []
1349        noClassRugPoints = []
1350
1351        yesBinsVals = [0] * bins
1352        noBinsVals = [0] * bins
1353        for (f, (thisNeg, thisPos)) in plist:
1354            yesClassRugPoints.append((f, thisPos)) # 1.
1355            noClassRugPoints.append((f, thisNeg)) # 1.
1356
1357            index = int(f * bins )
1358            index = min(index, bins - 1) ## just in case for value 1.
1359            yesBinsVals[index] += thisPos
1360            noBinsVals[index] += thisNeg
1361
1362        curve = []
1363        for cn in range(bins):
1364            f = float(cn * 1. / bins) + (1. / 2. / bins)
1365            yesVal = yesBinsVals[cn]
1366            noVal = noBinsVals[cn]
1367            allVal = yesVal + noVal
1368            if allVal == 0.: continue
1369            y = float(yesVal) / float(allVal)
1370            curve.append((f,  y))
1371
1372        ## smooth the curve
1373        maxnPoints = 100
1374        if len(curve) >= 3:
1375#            loessCurve = statc.loess(curve, -3, 0.6)
1376            loessCurve = statc.loess(curve, maxnPoints, 0.5, 3)
1377        else:
1378            loessCurve = curve
1379        clen = len(loessCurve)
1380        if clen > maxnPoints:
1381            df = clen / maxnPoints
1382            if df < 1: df = 1
1383            curve = [loessCurve[i]  for i in range(0, clen, df)]
1384        else:
1385            curve = loessCurve
1386        ## remove the third value (variance of epsilon?) that suddenly
1387        ## appeared in the output of the statc.loess function
1388        curve = [c[:2] for c in curve]
1389        results.append((curve, yesClassRugPoints, noClassRugPoints))
1390
1391    return results
1392
1393
1394## Lift Curve
1395## returns an array of curve elements, where:
1396##  - curve is an array of points ((TP + FP) / (P + N), TP / P, (th, FP / N))
1397##    on the Lift Curve
1398@deprecated_keywords({"classIndex": "class_index"})
1399def compute_lift_curve(res, class_index=-1):
1400    ## merge multiple iterations into one
1401    mres = Orange.evaluation.testing.ExperimentResults(1, res.classifier_names,
1402        res.class_values, res.weights, classifiers=res.classifiers,
1404    for te in res.results:
1405        mres.results.append(te)
1406
1407    problists, tots = corn.computeROCCumulative(mres, class_index)
1408
1409    results = []
1410    P, N = tots[1], tots[0]
1411    for plist in problists:
1412        ## corn gives an increasing by scores list, we need a decreasing
1413        plist.reverse()
1414        TP = 0.
1415        FP = 0.
1416        curve = [(0., 0., (10e300, 0.))]
1417        for (f, (thisNeg, thisPos)) in plist:
1418            TP += thisPos
1419            FP += thisNeg
1420            curve.append(((TP + FP) / (P + N), TP, (f, FP / (N or 1))))
1421        results.append(curve)
1422
1423    return P, N, results
1424
1425
1426class CDT:
1427  """Store the number of concordant (C), discordant (D) and tied (T) pairs."""
1428  def __init__(self, C=0., D=0., T=0.):
1429    self.C, self.D, self.T = C, D, T
1430
1431def is_CDT_empty(cdt):
1432    return cdt.C + cdt.D + cdt.T < 1e-20
1433
1434
1435@deprecated_keywords({"classIndex": "class_index",
1436                      "unweighted": "ignore_weights"})
1437def compute_CDT(res, class_index=-1, ignore_weights=False, **argkw):
1438    """Obsolete, don't use."""
1439    if class_index < 0:
1440        if res.baseClass >= 0:
1441            class_index = res.baseClass
1442        else:
1443            class_index = 1
1444
1445    useweights = res.weights and not ignore_weights
1446    weightByClasses = argkw.get("weightByClasses", True)
1447
1448    if res.number_of_iterations>1:
1449        CDTs = [CDT() for _ in range(res.number_of_learners)]
1450        iterationExperiments = split_by_iterations(res)
1451        for exp in iterationExperiments:
1452            expCDTs = corn.computeCDT(exp, class_index, useweights)
1453            for i in range(len(CDTs)):
1454                CDTs[i].C += expCDTs[i].C
1455                CDTs[i].D += expCDTs[i].D
1456                CDTs[i].T += expCDTs[i].T
1457        for i in range(res.number_of_learners):
1458            if is_CDT_empty(CDTs[0]):
1459                return corn.computeCDT(res, class_index, useweights)
1460
1461        return CDTs
1462    else:
1463        return corn.computeCDT(res, class_index, useweights)
1464
1465# Backward compatibility
1466def replace_use_weights(fun):
1467    if environ.orange_no_deprecated_members:
1468        return fun
1469
1470    @functools.wraps(fun)
1471    def wrapped(*args, **kwargs):
1472        use_weights = kwargs.pop("useWeights", None)
1473        if use_weights is not None:
1474            deprecation_warning("useWeights", "ignore_weights")
1475            kwargs["ignore_weights"] = not use_weights
1476        return fun(*args, **kwargs)
1477    return wrapped
1478
1479class AUC(list):
1480    """
1481    Compute the area under ROC curve given a set of experimental results.
1482    If testing consisted of multiple folds, each fold is scored and the
1483    average score is returned. If a fold contains only instances with the
1484    same class value, folds will be merged.
1485
1486    :param test_results: test results to score
1487    :param ignore_weights: ignore instance weights when calculating score
1488    :param multiclass: tells what kind of averaging to perform if the target
1489                       class has more than 2 values.
1490    """
1491
1492    #!Compute AUC for each pair of classes (ignoring instances of all other
1493    #!classes) and average the results, weighting them by the number of
1494    #!pairs of instances from these two classes (e.g. by the product of
1495    #!probabilities of the two classes). AUC computed in this way still
1496    #!behaves as the concordance index, e.g., gives the probability that two
1497    #!randomly chosen instances from different classes will be correctly
1498    #!recognized (if the classifier knows from which two classes the
1499    #!instances came).
1500    ByWeightedPairs = 0
1501
1502    #!Similar to ByWeightedPairs, except that the average over class pairs
1503    #!is not weighted. This AUC is, like the binary version, independent of
1504    #!class distributions, but it is not related to the concordance index
1505    #!any more.
1506    ByPairs = 1
1507
1508    #!For each class, it computes AUC for this class against all others (that
1509    #!is, treating other classes as one class). The AUCs are then averaged by
1510    #!the class probabilities. This is related to the concordance index in
1511    #!which we test the classifier's (average) capability of distinguishing
1512    #!the instances from a specified class from those that come from other
1513    #!classes.
1514    #!Unlike the binary AUC, the measure is not independent of class
1515    #!distributions.
1516    WeightedOneAgainstAll = 2
1517
1518    #!Similar to weighted_one_against_all, except that the average
1519    #!is not weighted.
1520    OneAgainstAll = 3
1521
1522    @replace_use_weights
1523    @deprecated_keywords({"method": "multiclass"})
1524    def __init__(self, test_results=None, multiclass=ByWeightedPairs, ignore_weights=False):
1525
1526        super(AUC, self).__init__()
1527
1528        self.ignore_weights=ignore_weights
1529        self.method=multiclass
1530
1531        if test_results is not None:
1532            self[:] = self.__call__(test_results)
1533
1534    def __call__(self, test_results):
1535        if len(test_results.class_values) < 2:
1536            raise ValueError("Cannot compute AUC on a single-class problem")
1537        elif len(test_results.class_values) == 2:
1538            return self._compute_for_binary_class(test_results)
1539        else:
1540            return self._compute_for_multi_value_class(test_results, self.method)
1541
1542    def _compute_for_binary_class(self, res):
1543        """AUC for binary classification problems"""
1544        if res.number_of_iterations > 1:
1545            return self._compute_for_multiple_folds(
1546                self._compute_one_class_against_all,
1547                split_by_iterations(res),
1548                (-1, res, res.number_of_iterations))
1549        else:
1550            return self._compute_one_class_against_all(res, -1)[0]
1551
1552    def _compute_for_multi_value_class(self, res, method=0):
1553        """AUC for multiclass classification problems"""
1554        numberOfClasses = len(res.class_values)
1555
1556        if res.number_of_iterations > 1:
1557            iterations = split_by_iterations(res)
1558            all_ite = res
1559        else:
1560            iterations = [res]
1561            all_ite = None
1562
1563        # by pairs
1564        sum_aucs = [0.] * res.number_of_learners
1565        usefulClassPairs = 0.
1566
1567        prob = None
1568        if method in [self.ByWeightedPairs, self.WeightedOneAgainstAll]:
1569            prob = class_probabilities_from_res(res)
1570
1571        if method in [self.ByWeightedPairs, self.ByPairs]:
1572            for classIndex1 in range(numberOfClasses):
1573                for classIndex2 in range(classIndex1):
1574                    subsum_aucs = self._compute_for_multiple_folds(
1575                        self._compute_one_class_against_another, iterations,
1576                        (classIndex1, classIndex2, all_ite,
1577                        res.number_of_iterations))
1578                    if subsum_aucs:
1579                        if method == self.ByWeightedPairs:
1580                            p_ij = prob[classIndex1] * prob[classIndex2]
1581                            subsum_aucs = [x * p_ij  for x in subsum_aucs]
1582                            usefulClassPairs += p_ij
1583                        else:
1584                            usefulClassPairs += 1
1585                        sum_aucs = map(add, sum_aucs, subsum_aucs)
1586        else:
1587            for classIndex in range(numberOfClasses):
1588                subsum_aucs = self._compute_for_multiple_folds(
1589                    self._compute_one_class_against_all,
1590                    iterations, (classIndex, all_ite,
1591                                 res.number_of_iterations))
1592                if subsum_aucs:
1593                    if method == self.ByWeightedPairs:
1594                        p_i = prob[classIndex]
1595                        subsum_aucs = [x * p_i  for x in subsum_aucs]
1596                        usefulClassPairs += p_i
1597                    else:
1598                        usefulClassPairs += 1
1599                    sum_aucs = map(add, sum_aucs, subsum_aucs)
1600
1601        if usefulClassPairs > 0:
1602            sum_aucs = [x/usefulClassPairs for x in sum_aucs]
1603
1604        return sum_aucs
1605
1606    # computes the average AUC over folds using "AUCcomputer" (AUC_i or AUC_ij)
1607    # it returns the sum of what is returned by the computer,
1608    # unless at a certain fold the computer has to resort to computing
1609    # over all folds or even this failed;
1610    # in these cases the result is returned immediately
1611    def _compute_for_multiple_folds(self, auc_computer, iterations,
1612                                 computer_args):
1613        """Compute the average AUC over folds using :obj:auc_computer."""
1614        subsum_aucs = [0.] * iterations[0].number_of_learners
1615        for ite in iterations:
1616            aucs, foldsUsed = auc_computer(*(ite, ) + computer_args)
1617            if not aucs:
1618                import warnings
1619                warnings.warn("AUC cannot be computed (all instances belong to the same class).")
1620                return
1621            if not foldsUsed:
1622                self[:] = aucs
1623                return aucs
1624            subsum_aucs = map(add, subsum_aucs, aucs)
1625        return subsum_aucs
1626
1627    # Computes AUC
1628    # in multivalued class problem, AUC is computed as one against all
1629    # results over folds are averages
1630    # if some folds examples from one class only, the folds are merged
1631    def _compute_for_single_class(self, res, class_index):
1632        if res.number_of_iterations > 1:
1633            return self._compute_for_multiple_folds(
1634                self._compute_one_class_against_all, split_by_iterations(res),
1635                (class_index, res, res.number_of_iterations))
1636        else:
1637            return self._compute_one_class_against_all(res, class_index)
1638
1639    # Computes AUC for a pair of classes (as if there were no other classes)
1640    # results over folds are averages
1641    # if some folds have examples from one class only, the folds are merged
1642    def _compute_for_pair_of_classes(self, res, class_index1, class_index2):
1643        if res.number_of_iterations > 1:
1644            return self._compute_for_multiple_folds(
1645                self._compute_one_class_against_another,
1646                split_by_iterations(res),
1647                (class_index1, class_index2, res, res.number_of_iterations))
1648        else:
1649            return self._compute_one_class_against_another(res, class_index1,
1650                                                    class_index2)
1651
1652    # computes AUC between class i and the other classes
1653    # (treating them as the same class)
1654    @deprecated_keywords({"classIndex": "class_index",
1655                          "divideByIfIte": "divide_by_if_ite"})
1656    def _compute_one_class_against_all(self, ite, class_index, all_ite=None,
1657                                      divide_by_if_ite=1.0):
1658        """Compute AUC between class i and all the other classes)"""
1659        return self._compute_auc(corn.computeCDT, ite, all_ite,
1660            divide_by_if_ite, (class_index, not self.ignore_weights))
1661
1662
1663    # computes AUC between classes i and j as if there are no other classes
1664    def _compute_one_class_against_another(
1665        self, ite, class_index1, class_index2,
1666        all_ite=None, divide_by_if_ite=1.):
1667        """
1668        Compute AUC between classes i and j as if there are no other classes.
1669        """
1670        return self._compute_auc(corn.computeCDTPair, ite,
1671            all_ite, divide_by_if_ite,
1672            (class_index1, class_index2, not self.ignore_weights))
1673
1674    # computes AUC using a specified 'cdtComputer' function
1675    # It tries to compute AUCs from 'ite' (examples from a single iteration)
1676    # and, if C+D+T=0, from 'all_ite' (entire test set). In the former case,
1677    # the AUCs are divided by 'divideByIfIte'.
1678    # Additional flag is returned which is True in the former case,
1679    # or False in the latter.
1680    @deprecated_keywords({"cdt_computer": "cdtComputer",
1681                          "divideByIfIte": "divide_by_if_ite",
1682                          "computerArgs": "computer_args"})
1683    def _compute_auc(self, cdt_computer, ite, all_ite, divide_by_if_ite,
1684                     computer_args):
1685        """
1686        Compute AUC using a :obj:cdt_computer.
1687        """
1688        cdts = cdt_computer(*(ite, ) + computer_args)
1689        if not is_CDT_empty(cdts[0]):
1690            return [(cdt.C + cdt.T / 2) / (cdt.C + cdt.D + cdt.T) /
1691                    divide_by_if_ite for cdt in cdts], True
1692
1693        if all_ite:
1694            cdts = cdt_computer(*(all_ite, ) + computer_args)
1695            if not is_CDT_empty(cdts[0]):
1696                return [(cdt.C + cdt.T / 2) / (cdt.C + cdt.D + cdt.T)
1697                        for cdt in cdts], False
1698
1699        return False, False
1700
1701class AUC_for_single_class(AUC):
1702    """
1703    Compute AUC where the class with the given class_index is singled
1704    out and all other classes are treated as a single class.
1705    """
1706    def __init__(self, test_results=None, class_index=-1, ignore_weights=False):
1707        if class_index < 0:
1708            if test_results and test_results.base_class >= 0:
1709                self.class_index = test_results.base_class
1710            else:
1711                self.class_index = 1
1712        else:
1713            self.class_index = class_index
1714
1715        super(AUC_for_single_class, self).__init__(test_results, ignore_weights=ignore_weights)
1716
1717    def __call__(self, test_results):
1718        return self._compute_for_single_class(test_results, self.class_index)
1719
1720
1721class AUC_for_pair_of_classes(AUC):
1722    """
1723    Computes AUC between a pair of classes, ignoring instances from all
1724    other classes.
1725    """
1726    def __init__(self, test_results, class_index1, class_index2, ignore_weights=False):
1727        self.class_index1 = class_index1
1728        self.class_index2 = class_index2
1729
1730        super(AUC_for_pair_of_classes, self).__init__(test_results, ignore_weights=ignore_weights)
1731
1732    def __call__(self, test_results):
1733        return self._compute_for_pair_of_classes(test_results, self.class_index1, self.class_index2)
1734
1735
1736class AUC_matrix(AUC):
1737    """
1738    Compute a (lower diagonal) matrix with AUCs for all pairs of classes.
1739    If there are empty classes, the corresponding elements in the matrix
1740    are -1.
1741    """
1742
1743    def __call__(self, test_results):
1744        numberOfClasses = len(test_results.class_values)
1745        number_of_learners = test_results.number_of_learners
1746        if test_results.number_of_iterations > 1:
1747            iterations, all_ite = split_by_iterations(test_results), test_results
1748        else:
1749            iterations, all_ite = [test_results], None
1750        aucs = [[[] for _ in range(numberOfClasses)]
1751        for _ in range(number_of_learners)]
1752        for classIndex1 in range(numberOfClasses):
1753            for classIndex2 in range(classIndex1):
1754                pair_aucs = self._compute_for_multiple_folds(
1755                    self._compute_one_class_against_another, iterations,
1756                    (classIndex1, classIndex2, all_ite,
1757                     test_results.number_of_iterations))
1758                if pair_aucs:
1759                    for lrn in range(number_of_learners):
1760                        aucs[lrn][classIndex1].append(pair_aucs[lrn])
1761                else:
1762                    for lrn in range(number_of_learners):
1763                        aucs[lrn][classIndex1].append(-1)
1764        return aucs
1765
1766#Backward compatibility
1767@replace_use_weights
1768def AUC_binary(res, ignore_weights=False):
1769    auc = deprecated_function_name(AUC)(ignore_weights=ignore_weights)
1770    auc._compute_for_binary_class(res)
1771    return auc
1772
1773@replace_use_weights
1774def AUC_multi(res, ignore_weights=False, method=0):
1775    auc = deprecated_function_name(AUC)(ignore_weights=ignore_weights,
1776        method=method)
1777    auc._compute_for_multi_value_class(res)
1778    return auc
1779
1780
1781@deprecated_keywords({"AUCcomputer": "auc_computer",
1782                      "computerArgs": "computer_args"})
1783def AUC_iterations(auc_computer, iterations, computer_args):
1784    auc = deprecated_function_name(AUC)()
1785    auc._compute_for_multiple_folds(auc_computer, iterations, computer_args)
1786    return auc
1787
1788def AUC_x(cdtComputer, ite, all_ite, divide_by_if_ite, computer_args):
1789    auc = deprecated_function_name(AUC)()
1790    result = auc._compute_auc(cdtComputer, ite, all_ite, divide_by_if_ite,
1791                              computer_args)
1792    return result
1793
1794@replace_use_weights
1795def AUC_i(ite, class_index, ignore_weights=False, all_ite=None,
1796          divide_by_if_ite=1.):
1797    auc = deprecated_function_name(AUC)(ignore_weights=ignore_weights)
1798    result = auc._compute_one_class_against_another(ite, class_index,
1799        all_ite=all_ite, divide_by_if_ite=divide_by_if_ite)
1800    return result
1801
1802
1803@replace_use_weights
1804def AUC_ij(ite, class_index1, class_index2, ignore_weights=False,
1805           all_ite=None, divide_by_if_ite=1.):
1806    auc = deprecated_function_name(AUC)(ignore_weights=ignore_weights)
1807    result = auc._compute_one_class_against_another(
1808        ite, class_index1, class_index2, all_ite=all_ite, divide_by_if_ite=divide_by_if_ite)
1809    return result
1810
1811AUC_single = replace_use_weights(
1812             deprecated_keywords({"classIndex": "class_index"})(
1813             deprecated_function_name(AUC_for_single_class)))
1814AUC_pair = replace_use_weights(
1815           deprecated_keywords({"classIndex1": "class_index1",
1816                                "classIndex2": "class_index2"})(
1817           deprecated_function_name(AUC_for_pair_of_classes)))
1818AUC_matrix = replace_use_weights(AUC_matrix)
1819
1820
1821@deprecated_keywords({"unweighted": "ignore_weights"})
1822def McNemar(res, ignore_weights=False, **argkw):
1823    """
1824    Compute a triangular matrix with McNemar statistics for each pair of
1825    classifiers. The statistics is distributed by chi-square distribution with
1826    one degree of freedom; critical value for 5% significance is around 3.84.
1827    """
1828    nLearners = res.number_of_learners
1829    mcm = []
1830    for i in range(nLearners):
1831       mcm.append([0.] * res.number_of_learners)
1832
1833    if not res.weights or ignore_weights:
1834        for i in res.results:
1835            actual = i.actual_class
1836            classes = i.classes
1837            for l1 in range(nLearners):
1838                for l2 in range(l1, nLearners):
1839                    if classes[l1] == actual:
1840                        if classes[l2] != actual:
1841                            mcm[l1][l2] += 1
1842                    elif classes[l2] == actual:
1843                        mcm[l2][l1] += 1
1844    else:
1845        for i in res.results:
1846            actual = i.actual_class
1847            classes = i.classes
1848            for l1 in range(nLearners):
1849                for l2 in range(l1, nLearners):
1850                    if classes[l1] == actual:
1851                        if classes[l2] != actual:
1852                            mcm[l1][l2] += i.weight
1853                    elif classes[l2] == actual:
1854                        mcm[l2][l1] += i.weight
1855
1856    for l1 in range(nLearners):
1857        for l2 in range(l1, nLearners):
1858            su = mcm[l1][l2] + mcm[l2][l1]
1859            if su:
1860                mcm[l2][l1] = (abs(mcm[l1][l2] - mcm[l2][l1]) - 1)**2 / su
1861            else:
1862                mcm[l2][l1] = 0
1863
1864    for l1 in range(nLearners):
1865        mcm[l1]=mcm[l1][:l1]
1866
1867    return mcm
1868
1869
1870def McNemar_of_two(res, lrn1, lrn2, ignore_weights=False):
1871    """
1872    McNemar_of_two computes a McNemar statistics for a pair of classifier,
1873    specified by indices learner1 and learner2.
1874    """
1875    tf = ft = 0.
1876    if not res.weights or ignore_weights:
1877        for i in res.results:
1878            actual = i.actual_class
1879            if i.classes[lrn1] == actual:
1880                if i.classes[lrn2] != actual:
1881                    tf += i.weight
1882            elif i.classes[lrn2] == actual:
1883                    ft += i.weight
1884    else:
1885        for i in res.results:
1886            actual = i.actual_class
1887            if i.classes[lrn1] == actual:
1888                if i.classes[lrn2] != actual:
1889                    tf += 1.
1890            elif i.classes[lrn2] == actual:
1891                    ft += 1.
1892
1893    su = tf + ft
1894    if su:
1895        return (abs(tf - ft) - 1)**2 / su
1896    else:
1897        return 0
1898
1899
1900def Friedman(res, stat=CA):
1901    """
1902    Compare classifiers with Friedman test, treating folds as different examles.
1903    Returns F, p and average ranks.
1904    """
1905    res_split = split_by_iterations(res)
1906    res = [stat(r) for r in res_split]
1907
1908    N = len(res)
1909    k = len(res[0])
1910    sums = [0.] * k
1911    for r in res:
1912        ranks = [k - x + 1 for x in statc.rankdata(r)]
1913        if stat == Brier_score: # reverse ranks for Brier_score (lower better)
1914            ranks = [k + 1 - x for x in ranks]
1915        sums = map(add, ranks, sums)
1916
1917    T = sum(x * x for x in sums)
1918    sums = [x / N for x in sums]
1919
1920    F = 12. / (N * k * (k + 1)) * T  - 3 * N * (k + 1)
1921
1922    return F, statc.chisqprob(F, k - 1), sums
1923
1924
1925def Wilcoxon_pairs(res, avgranks, stat=CA):
1926    """
1927    Return a triangular matrix, where element[i][j] stores significance of
1928    difference between the i-th and the j-th classifier, as computed by the
1929    Wilcoxon test. The element is positive if the i-th is better than the j-th,
1930    negative if it is worse, and 1 if they are equal.
1931    Arguments are ExperimentResults, average ranks (as returned by Friedman)
1932    and, optionally, a statistics; greater values should mean better results.
1933    """
1934    res_split = split_by_iterations(res)
1935    res = [stat(r) for r in res_split]
1936
1937    k = len(res[0])
1938    bt = []
1939    for m1 in range(k):
1940        nl = []
1941        for m2 in range(m1 + 1, k):
1942            t, p = statc.wilcoxont([r[m1] for r in res], [r[m2] for r in res])
1943            if avgranks[m1]<avgranks[m2]:
1944                nl.append(p)
1945            elif avgranks[m2]<avgranks[m1]:
1946                nl.append(-p)
1947            else:
1948                nl.append(1)
1949        bt.append(nl)
1950    return bt
1951
1952
1953@deprecated_keywords({"allResults": "all_results",
1954                      "noConfidence": "no_confidence"})
1955def plot_learning_curve_learners(file, all_results, proportions, learners,
1956                                 no_confidence=0):
1957    plot_learning_curve(file, all_results, proportions,
1958        [Orange.misc.getobjectname(learners[i], "Learner %i" % i)
1959        for i in range(len(learners))], no_confidence)
1960
1961
1962@deprecated_keywords({"allResults": "all_results",
1963                      "noConfidence": "no_confidence"})
1964def plot_learning_curve(file, all_results, proportions, legend,
1965                        no_confidence=0):
1966    import types
1967    fopened = 0
1968    if type(file) == types.StringType:
1969        file = open(file, "wt")
1970        fopened = 1
1971
1972    file.write("set yrange [0:1]\n")
1973    file.write("set xrange [%f:%f]\n" % (proportions[0], proportions[-1]))
1974    file.write("set multiplot\n\n")
1975    CAs = [CA(x, report_se=True) for x in all_results]
1976
1977    file.write("plot \\\n")
1978    for i in range(len(legend) - 1):
1979        if not no_confidence:
1980            file.write("'-' title '' with yerrorbars pointtype %i,\\\n" % (i+1))
1981        file.write("'-' title '%s' with linespoints pointtype %i,\\\n" % (legend[i], i+1))
1982    if not no_confidence:
1983        file.write("'-' title '' with yerrorbars pointtype %i,\\\n" % (len(legend)))
1984    file.write("'-' title '%s' with linespoints pointtype %i\n" % (legend[-1], len(legend)))
1985
1986    for i in range(len(legend)):
1987        if not no_confidence:
1988            for p in range(len(proportions)):
1989                file.write("%f\t%f\t%f\n" % (proportions[p], CAs[p][i][0], 1.96*CAs[p][i][1]))
1990            file.write("e\n\n")
1991
1992        for p in range(len(proportions)):
1993            file.write("%f\t%f\n" % (proportions[p], CAs[p][i][0]))
1994        file.write("e\n\n")
1995
1996    if fopened:
1997        file.close()
1998
1999
2000def print_single_ROC_curve_coordinates(file, curve):
2001    import types
2002    fopened=0
2003    if type(file)==types.StringType:
2004        file=open(file, "wt")
2005        fopened=1
2006
2007    for coord in curve:
2008        file.write("%5.3f\t%5.3f\n" % tuple(coord))
2009
2010    if fopened:
2011        file.close()
2012
2013
2014def plot_ROC_learners(file, curves, learners):
2015    plot_ROC(file, curves, [Orange.misc.getobjectname(learners[i], "Learner %i" % i) for i in range(len(learners))])
2016
2017def plot_ROC(file, curves, legend):
2018    import types
2019    fopened=0
2020    if type(file)==types.StringType:
2021        file=open(file, "wt")
2022        fopened=1
2023
2024    file.write("set yrange [0:1]\n")
2025    file.write("set xrange [0:1]\n")
2026    file.write("set multiplot\n\n")
2027
2028    file.write("plot \\\n")
2029    for leg in legend:
2030        file.write("'-' title '%s' with lines,\\\n" % leg)
2031    file.write("'-' title '' with lines\n")
2032
2033    for curve in curves:
2034        for coord in curve:
2035            file.write("%5.3f\t%5.3f\n" % tuple(coord))
2036        file.write("e\n\n")
2037
2038    file.write("1.0\t1.0\n0.0\t0.0e\n\n")
2039
2040    if fopened:
2041        file.close()
2042
2043
2044@deprecated_keywords({"allResults": "all_results"})
2045def plot_McNemar_curve_learners(file, all_results, proportions, learners, reference=-1):
2046    plot_McNemar_curve(file, all_results, proportions, [Orange.misc.getobjectname(learners[i], "Learner %i" % i) for i in range(len(learners))], reference)
2047
2048
2049@deprecated_keywords({"allResults": "all_results"})
2050def plot_McNemar_curve(file, all_results, proportions, legend, reference=-1):
2051    if reference<0:
2052        reference=len(legend)-1
2053
2054    import types
2055    fopened=0
2056    if type(file)==types.StringType:
2057        file=open(file, "wt")
2058        fopened=1
2059
2060    #file.write("set yrange [0:1]\n")
2061    #file.write("set xrange [%f:%f]\n" % (proportions[0], proportions[-1]))
2062    file.write("set multiplot\n\n")
2063    file.write("plot \\\n")
2064    tmap=range(reference)+range(reference+1, len(legend))
2065    for i in tmap[:-1]:
2066        file.write("'-' title '%s' with linespoints pointtype %i,\\\n" % (legend[i], i+1))
2067    file.write("'-' title '%s' with linespoints pointtype %i\n" % (legend[tmap[-1]], tmap[-1]))
2068    file.write("\n")
2069
2070    for i in tmap:
2071        for p in range(len(proportions)):
2072            file.write("%f\t%f\n" % (proportions[p], McNemar_of_two(all_results[p], i, reference)))
2073        file.write("e\n\n")
2074
2075    if fopened:
2076        file.close()
2077
2078default_point_types=("{$\\circ$}", "{$\\diamond$}", "{$+$}", "{$\\times$}", "{$|$}")+tuple([chr(x) for x in range(97, 122)])
2079default_line_types=("\\setsolid", "\\setdashpattern <4pt, 2pt>", "\\setdashpattern <8pt, 2pt>", "\\setdashes", "\\setdots")
2080
2081@deprecated_keywords({"allResults": "all_results"})
2082def learning_curve_learners_to_PiCTeX(file, all_results, proportions, **options):
2083    return apply(learning_curve_to_PiCTeX, (file, all_results, proportions), options)
2084
2085
2086@deprecated_keywords({"allResults": "all_results"})
2087def learning_curve_to_PiCTeX(file, all_results, proportions, **options):
2088    import types
2089    fopened=0
2090    if type(file)==types.StringType:
2091        file=open(file, "wt")
2092        fopened=1
2093
2094    nexamples=len(all_results[0].results)
2095    CAs = [CA(x, report_se=True) for x in all_results]
2096
2097    graphsize=float(options.get("graphsize", 10.0)) #cm
2098    difprop=proportions[-1]-proportions[0]
2099    ntestexamples=nexamples*proportions[-1]
2100    xunit=graphsize/ntestexamples
2101
2102    yshift=float(options.get("yshift", -ntestexamples/20.))
2103
2104    pointtypes=options.get("pointtypes", default_point_types)
2105    linetypes=options.get("linetypes", default_line_types)
2106
2107    if options.has_key("numberedx"):
2108        numberedx=options["numberedx"]
2109        if type(numberedx)==types.IntType:
2110            if numberedx>0:
2111                numberedx=[nexamples*proportions[int(i/float(numberedx)*len(proportions))] for i in range(numberedx)]+[proportions[-1]*nexamples]
2112            elif numberedx<0:
2113                numberedx = -numberedx
2114                newn=[]
2115                for i in range(numberedx+1):
2116                    wanted=proportions[0]+float(i)/numberedx*difprop
2117                    best=(10, 0)
2118                    for t in proportions:
2119                        td=abs(wanted-t)
2120                        if td<best[0]:
2121                            best=(td, t)
2122                    if not best[1] in newn:
2123                        newn.append(best[1])
2124                newn.sort()
2125                numberedx=[nexamples*x for x in newn]
2126        elif type(numberedx[0])==types.FloatType:
2127            numberedx=[nexamples*x for x in numberedx]
2128    else:
2129        numberedx=[nexamples*x for x in proportions]
2130
2131    file.write("\\mbox{\n")
2132    file.write(\\beginpicture\n")
2133    file.write(\\setcoordinatesystem units <%10.8fcm, %5.3fcm>\n\n" % (xunit, graphsize))
2134    file.write(\\setplotarea x from %5.3f to %5.3f, y from 0 to 1\n" % (0, ntestexamples))
2135    file.write(\\axis bottom invisible\n")# label {#examples}\n")
2136    file.write("      ticks short at %s /\n" % reduce(lambda x,y:x+" "+y, ["%i"%(x*nexamples+0.5) for x in proportions]))
2137    if numberedx:
2138        file.write("            long numbered at %s /\n" % reduce(lambda x,y:x+y, ["%i " % int(x+0.5) for x in numberedx]))
2139    file.write("  /\n")
2140    file.write(\\axis left invisible\n")# label {classification accuracy}\n")
2141    file.write("      shiftedto y=%5.3f\n" % yshift)
2142    file.write("      ticks short from 0.0 to 1.0 by 0.05\n")
2143    file.write("            long numbered from 0.0 to 1.0 by 0.25\n")
2144    file.write("  /\n")
2145    if options.has_key("default"):
2146        file.write(\\setdashpattern<1pt, 1pt>\n")
2147        file.write(\\plot %5.3f %5.3f %5.3f %5.3f /\n" % (0., options["default"], ntestexamples, options["default"]))
2148
2149    for i in range(len(CAs[0])):
2150        coordinates=reduce(lambda x,y:x+" "+y, ["%i %5.3f" % (proportions[p]*nexamples, CAs[p][i][0]) for p in range(len(proportions))])
2151        if linetypes:
2152            file.write(%s\n" % linetypes[i])
2153            file.write(\\plot %s /\n" % coordinates)
2154        if pointtypes:
2155            file.write(\\multiput %s at %s /\n" % (pointtypes[i], coordinates))
2156
2157    file.write(\\endpicture\n")
2158    file.write("}\n")
2159    if fopened:
2160        file.close()
2161    file.close()
2162    del file
2163
2164def legend_learners_to_PiCTeX(file, learners, **options):
2165  return apply(legend_to_PiCTeX, (file, [Orange.misc.getobjectname(learners[i], "Learner %i" % i) for i in range(len(learners))]), options)
2166
2167def legend_to_PiCTeX(file, legend, **options):
2168    import types
2169    fopened=0
2170    if type(file)==types.StringType:
2171        file=open(file, "wt")
2172        fopened=1
2173
2174    pointtypes=options.get("pointtypes", default_point_types)
2175    linetypes=options.get("linetypes", default_line_types)
2176
2177    file.write("\\mbox{\n")
2178    file.write(\\beginpicture\n")
2179    file.write(\\setcoordinatesystem units <5cm, 1pt>\n\n")
2180    file.write(\\setplotarea x from 0.000 to %5.3f, y from 0 to 12\n" % len(legend))
2181
2182    for i in range(len(legend)):
2183        if linetypes:
2184            file.write(%s\n" % linetypes[i])
2185            file.write(\\plot %5.3f 6 %5.3f 6 /\n" % (i, i+0.2))
2186        if pointtypes:
2187            file.write(\\put {%s} at %5.3f 6\n" % (pointtypes[i], i+0.1))
2188        file.write(\\put {%s} [lb] at %5.3f 0\n" % (legend[i], i+0.25))
2189
2190    file.write(\\endpicture\n")
2191    file.write("}\n")
2192    if fopened:
2193        file.close()
2194    file.close()
2195    del file
2196
2197
2198def compute_friedman(avranks, N):
2199    """ Returns a tuple composed of (friedman statistic, degrees of freedom)
2200    and (Iman statistic - F-distribution, degrees of freedoma) given average
2201    ranks and a number of tested data sets N.
2202    """
2203
2204    k = len(avranks)
2205
2206    def friedman(N, k, ranks):
2207        return 12*N*(sum([rank**2.0 for rank in ranks]) - (k*(k+1)*(k+1)/4.0) )/(k*(k+1))
2208
2209    def iman(fried, N, k):
2210        return (N-1)*fried/(N*(k-1) - fried)
2211
2212    f = friedman(N, k, avranks)
2213    im = iman(f, N, k)
2214    fdistdof = (k-1, (k-1)*(N-1))
2215
2216    return (f, k-1), (im, fdistdof)
2217
2218def compute_CD(avranks, N, alpha="0.05", type="nemenyi"):
2219    """ Returns critical difference for Nemenyi or Bonferroni-Dunn test
2220    according to given alpha (either alpha="0.05" or alpha="0.1") for average
2221    ranks and number of tested data sets N. Type can be either "nemenyi" for
2222    for Nemenyi two tailed test or "bonferroni-dunn" for Bonferroni-Dunn test.
2223    """
2224
2225    k = len(avranks)
2226
2227    d = {("nemenyi", "0.05"): [0, 0, 1.959964, 2.343701, 2.569032, 2.727774,
2228                               2.849705, 2.94832, 3.030879, 3.101730, 3.163684,
2229                               3.218654, 3.268004, 3.312739, 3.353618, 3.39123,
2230                               3.426041, 3.458425, 3.488685, 3.517073, 3.543799]
2231        , ("nemenyi", "0.1"): [0, 0, 1.644854, 2.052293, 2.291341, 2.459516,
2232                               2.588521, 2.692732, 2.779884, 2.854606, 2.919889,
2233                               2.977768, 3.029694, 3.076733, 3.119693, 3.159199,
2234                               3.195743, 3.229723, 3.261461, 3.291224, 3.319233]
2235        , ("bonferroni-dunn", "0.05"): [0, 0, 1.960, 2.241, 2.394, 2.498, 2.576,
2236                                        2.638, 2.690, 2.724, 2.773],
2237         ("bonferroni-dunn", "0.1"): [0, 0, 1.645, 1.960, 2.128, 2.241, 2.326,
2238                                      2.394, 2.450, 2.498, 2.539]}
2239
2240    #can be computed in R as qtukey(0.95, n, Inf)**0.5
2241    #for (x in c(2:20)) print(qtukey(0.95, x, Inf)/(2**0.5)
2242
2243    q = d[(type, alpha)]
2244
2245    cd = q[k]*(k*(k+1)/(6.0*N))**0.5
2246
2247    return cd
2248
2249
2250def graph_ranks(filename, avranks, names, cd=None, cdmethod=None, lowv=None, highv=None, width=6, textspace=1, reverse=False, **kwargs):
2251    """
2252    Draws a CD graph, which is used to display  the differences in methods'
2253    performance.
2254    See Janez Demsar, Statistical Comparisons of Classifiers over
2255    Multiple Data Sets, 7(Jan):1--30, 2006.
2256
2257    Needs matplotlib to work.
2258
2259    :param filename: Output file name (with extension). Formats supported
2260                     by matplotlib can be used.
2261    :param avranks: List of average methods' ranks.
2262    :param names: List of methods' names.
2263
2264    :param cd: Critical difference. Used for marking methods that whose
2265               difference is not statistically significant.
2266    :param lowv: The lowest shown rank, if None, use 1.
2267    :param highv: The highest shown rank, if None, use len(avranks).
2268    :param width: Width of the drawn figure in inches, default 6 in.
2269    :param textspace: Space on figure sides left for the description
2270                      of methods, default 1 in.
2271    :param reverse:  If True, the lowest rank is on the right. Default\: False.
2272    :param cdmethod: None by default. It can be an index of element in avranks
2273                     or or names which specifies the method which should be
2274                     marked with an interval.
2275    """
2276    if not HAS_MATPLOTLIB:
2277        import sys
2278        print >> sys.stderr, "Function requires matplotlib. Please install it."
2279        return
2280
2281    width = float(width)
2282    textspace = float(textspace)
2283
2284    def nth(l,n):
2285        """
2286        Returns only nth elemnt in a list.
2287        """
2288        n = lloc(l,n)
2289        return [ a[n] for a in l ]
2290
2291    def lloc(l,n):
2292        """
2293        List location in list of list structure.
2294        Enable the use of negative locations:
2295        -1 is the last element, -2 second last...
2296        """
2297        if n < 0:
2298            return len(l[0])+n
2299        else:
2300            return n
2301
2302    def mxrange(lr):
2303        """
2304        Multiple xranges. Can be used to traverse matrices.
2305        This function is very slow due to unknown number of
2306        parameters.
2307
2308        >>> mxrange([3,5])
2309        [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]
2310
2311        >>> mxrange([[3,5,1],[9,0,-3]])
2312        [(3, 9), (3, 6), (3, 3), (4, 9), (4, 6), (4, 3)]
2313
2314        """
2315        if not len(lr):
2316            yield ()
2317        else:
2318            #it can work with single numbers
2319            index = lr[0]
2320            if type(1) == type(index):
2321                index = [ index ]
2322            for a in range(*index):
2323                for b in mxrange(lr[1:]):
2324                    yield tuple([a] + list(b))
2325
2326
2327    from matplotlib.figure import Figure
2328    from matplotlib.backends.backend_agg import FigureCanvasAgg
2329
2330    def print_figure(fig, *args, **kwargs):
2331        canvas = FigureCanvasAgg(fig)
2332        canvas.print_figure(*args, **kwargs)
2333
2334    sums = avranks
2335
2336    tempsort =  sorted([ (a,i) for i,a in  enumerate(sums) ], reverse=reverse)
2337    ssums = nth(tempsort, 0)
2338    sortidx = nth(tempsort, 1)
2339    nnames = [ names[x] for x in sortidx ]
2340
2341    if lowv is None:
2342        lowv = min(1, int(math.floor(min(ssums))))
2343    if highv is None:
2344        highv = max(len(avranks), int(math.ceil(max(ssums))))
2345
2346    cline = 0.4
2347
2348    k = len(sums)
2349
2350    lines = None
2351
2352    linesblank = 0
2353    scalewidth = width - 2*textspace
2354
2355    def rankpos(rank):
2356        if not reverse:
2357            a = rank - lowv
2358        else:
2359            a = highv - rank
2360        return textspace+scalewidth/(highv-lowv)*a
2361
2362    distanceh = 0.25
2363
2364    if cd and cdmethod is None:
2365
2366        #get pairs of non significant methods
2367
2368        def get_lines(sums, hsd):
2369
2370            #get all pairs
2371            lsums = len(sums)
2372            allpairs = [ (i,j) for i,j in mxrange([[lsums], [lsums]]) if j > i ]
2373            #remove not significant
2374            notSig = [ (i,j) for i,j in allpairs if abs(sums[i]-sums[j]) <= hsd ]
2375            #keep only longest
2376
2377            def no_longer((i,j), notSig):
2378                for i1,j1 in notSig:
2379                    if (i1 <= i and j1 > j) or (i1 < i and j1 >= j):
2380                        return False
2381                return True
2382
2383            longest = [ (i,j) for i,j in notSig if no_longer((i,j),notSig) ]
2384
2385            return longest
2386
2387        lines = get_lines(ssums, cd)
2388        linesblank = 0.2 + 0.2 + (len(lines)-1)*0.1
2389
2391        distanceh = 0.25
2392        cline += distanceh
2393
2394    #calculate height needed height of an image
2395    minnotsignificant = max(2*0.2, linesblank)
2396    height = cline + ((k+1)/2)*0.2 + minnotsignificant
2397
2398    fig = Figure(figsize=(width, height))
2399    ax = fig.add_axes([0,0,1,1]) #reverse y axis
2400    ax.set_axis_off()
2401
2402    hf = 1./height # height factor
2403    wf = 1./width
2404
2405    def hfl(l):
2406        return [ a*hf for a in l ]
2407
2408    def wfl(l):
2409        return [ a*wf for a in l ]
2410
2411
2412    # Upper left corner is (0,0).
2413
2414    ax.plot([0,1], [0,1], c="w")
2415    ax.set_xlim(0, 1)
2416    ax.set_ylim(1, 0)
2417
2418    def line(l, color='k', **kwargs):
2419        """
2420        Input is a list of pairs of points.
2421        """
2422        ax.plot(wfl(nth(l,0)), hfl(nth(l,1)), color=color, **kwargs)
2423
2424    def text(x, y, s, *args, **kwargs):
2425        ax.text(wf*x, hf*y, s, *args, **kwargs)
2426
2427    line([(textspace, cline), (width-textspace, cline)], linewidth=0.7)
2428
2429    bigtick = 0.1
2430    smalltick = 0.05
2431
2432
2433    import numpy
2434    tick = None
2435    for a in list(numpy.arange(lowv, highv, 0.5)) + [highv]:
2436        tick = smalltick
2437        if a == int(a): tick = bigtick
2438        line([(rankpos(a), cline-tick/2),(rankpos(a), cline)], linewidth=0.7)
2439
2440    for a in range(lowv, highv+1):
2441        text(rankpos(a), cline-tick/2-0.05, str(a), ha="center", va="bottom")
2442
2443    k = len(ssums)
2444
2445    for i in range((k+1)/2):
2446        chei = cline+ minnotsignificant + i *0.2
2447        line([(rankpos(ssums[i]), cline), (rankpos(ssums[i]), chei), (textspace-0.1, chei)], linewidth=0.7)
2448        text(textspace-0.2, chei, nnames[i], ha="right", va="center")
2449
2450    for i in range((k+1)/2, k):
2451        chei = cline + minnotsignificant + (k-i-1)*0.2
2452        line([(rankpos(ssums[i]), cline), (rankpos(ssums[i]), chei), (textspace+scalewidth+0.1, chei)], linewidth=0.7)
2453        text(textspace+scalewidth+0.2, chei, nnames[i], ha="left", va="center")
2454
2455    if cd and cdmethod is None:
2456
2457        #upper scale
2458        if not reverse:
2459            begin, end = rankpos(lowv), rankpos(lowv+cd)
2460        else:
2461            begin, end = rankpos(highv), rankpos(highv - cd)
2462
2463        line([(begin, distanceh), (end, distanceh)], linewidth=0.7)
2464        line([(begin, distanceh + bigtick/2), (begin, distanceh - bigtick/2)], linewidth=0.7)
2465        line([(end, distanceh + bigtick/2), (end, distanceh - bigtick/2)], linewidth=0.7)
2466        text((begin+end)/2, distanceh - 0.05, "CD", ha="center", va="bottom")
2467
2468        #non significance lines
2469        def draw_lines(lines, side=0.05, height=0.1):
2470            start = cline + 0.2
2471            for l,r in lines:
2472                line([(rankpos(ssums[l])-side, start), (rankpos(ssums[r])+side, start)], linewidth=2.5)
2473                start += height
2474
2475        draw_lines(lines)
2476
2477    elif cd:
2478        begin = rankpos(avranks[cdmethod]-cd)
2479        end = rankpos(avranks[cdmethod]+cd)
2480        line([(begin, cline), (end, cline)], linewidth=2.5)
2481        line([(begin, cline + bigtick/2), (begin, cline - bigtick/2)], linewidth=2.5)
2482        line([(end, cline + bigtick/2), (end, cline - bigtick/2)], linewidth=2.5)
2483
2484    print_figure(fig, filename, **kwargs)
2485
2486def mlc_hamming_loss(res):
2487    """
2488    Schapire and Singer (2000) presented Hamming Loss, which id defined as:
2489
2490    :math:HammingLoss(H,D)=\\frac{1}{|D|} \\sum_{i=1}^{|D|} \\frac{Y_i \\vartriangle Z_i}{|L|}
2491    """
2492    losses = [0]*res.number_of_learners
2493    label_num = len(res.labels)
2494    example_num = gettotsize(res)
2495
2496    for e in res.results:
2497        aclass = e.actual_class
2498        for i, labels in enumerate(e.classes):
2499            labels = map(int, labels)
2500            if len(labels) <> len(aclass):
2501                raise ValueError, "The dimensions of the classified output and the actual class array do not match."
2502            for j in range(label_num):
2503                if labels[j] != aclass[j]:
2504                    losses[i] += 1
2505
2506    return [float(x)/(label_num*example_num) for x in losses]
2507
2508def mlc_accuracy(res, forgiveness_rate = 1.0):
2509    """
2510    Godbole & Sarawagi, 2004 uses the metrics accuracy, precision, recall as follows:
2511
2512    :math:Accuracy(H,D)=\\frac{1}{|D|} \\sum_{i=1}^{|D|} \\frac{|Y_i \\cap Z_i|}{|Y_i \\cup Z_i|}
2513
2514    Boutell et al. (2004) give a more generalized version using a parameter :math:\\alpha \\ge 0,
2515    called forgiveness rate:
2516
2517    :math:Accuracy(H,D)=\\frac{1}{|D|} \\sum_{i=1}^{|D|} (\\frac{|Y_i \\cap Z_i|}{|Y_i \\cup Z_i|})^{\\alpha}
2518    """
2519    accuracies = [0.0]*res.number_of_learners
2520    example_num = gettotsize(res)
2521
2522    for e in res.results:
2523        aclass = e.actual_class
2524        for i, labels in enumerate(e.classes):
2525            labels = map(int, labels)
2526            if len(labels) <> len(aclass):
2527                raise ValueError, "The dimensions of the classified output and the actual class array do not match."
2528
2529            intersection = 0.0
2530            union = 0.0
2531            for real, pred in zip(labels, aclass):
2532                if real and pred:
2533                    intersection += 1
2534                if real or pred:
2535                    union += 1
2536
2537            if union:
2538                accuracies[i] += intersection / union
2539
2540    return [math.pow(x/example_num,forgiveness_rate) for x in accuracies]
2541
2542def mlc_precision(res):
2543    """
2544    :math:Precision(H,D)=\\frac{1}{|D|} \\sum_{i=1}^{|D|} \\frac{|Y_i \\cap Z_i|}{|Z_i|}
2545    """
2546    precisions = [0.0]*res.number_of_learners
2547    example_num = gettotsize(res)
2548
2549    for e in res.results:
2550        aclass = e.actual_class
2551        for i, labels in enumerate(e.classes):
2552            labels = map(int, labels)
2553            if len(labels) <> len(aclass):
2554                raise ValueError, "The dimensions of the classified output and the actual class array do not match."
2555
2556            intersection = 0.0
2557            predicted = 0.0
2558            for real, pred in zip(labels, aclass):
2559                if real and pred:
2560                    intersection += 1
2561                if real:
2562                    predicted += 1
2563            if predicted:
2564                precisions[i] += intersection / predicted
2565
2566    return [x/example_num for x in precisions]
2567
2568def mlc_recall(res):
2569    """
2570    :math:Recall(H,D)=\\frac{1}{|D|} \\sum_{i=1}^{|D|} \\frac{|Y_i \\cap Z_i|}{|Y_i|}
2571    """
2572    recalls = [0.0]*res.number_of_learners
2573    example_num = gettotsize(res)
2574
2575    for e in res.results:
2576        aclass = e.actual_class
2577        for i, labels in enumerate(e.classes):
2578            labels = map(int, labels)
2579            if len(labels) <> len(aclass):
2580                raise ValueError, "The dimensions of the classified output and the actual class array do not match."
2581
2582            intersection = 0.0
2583            actual = 0.0
2584            for real, pred in zip(labels, aclass):
2585                if real and pred:
2586                    intersection += 1
2587                if pred:
2588                    actual += 1
2589            if actual:
2590                recalls[i] += intersection / actual
2591
2592    return [x/example_num for x in recalls]
2593
2594#def mlc_ranking_loss(res):
2595#    pass
2596#
2597#def mlc_average_precision(res):
2598#    pass
2599#
2600#def mlc_hierarchical_loss(res):
2601#    pass
2602
2603
2604def mt_average_score(res, score, weights=None):
2605    """
2606    Compute individual scores for each target and return the (weighted) average.
2607
2608    One method can be used to compute scores for all targets or a list of
2609    scoring methods can be passed to use different methods for different
2610    targets. In the latter case, care has to be taken if the ranges of scoring
2611    methods differ.
2612    For example, when the first target is scored from -1 to 1 (1 best) and the
2613    second from 0 to 1 (0 best), using weights=[0.5,-1] would scale both
2614    to a span of 1, and invert the second so that higher scores are better.
2615
2616    :param score: Single-target scoring method or a list of such methods
2617                  (one for each target).
2618    :param weights: List of real weights, one for each target,
2619                    for a weighted average.
2620
2621    """
2622    if not len(res.results):
2623        raise ValueError, "Cannot compute the score: no examples."
2624    if res.number_of_learners < 1:
2625        return []
2626    n_classes = len(res.results[0].actual_class)
2627    if weights is None:
2628        weights = [1.] * n_classes
2629    if not hasattr(score, '__len__'):
2630        score = [score] * n_classes
2631    elif len(score) != n_classes:
2632        raise ValueError, "Number of scoring methods and targets do not match."
2633    # save original classes
2634    clsss = [te.classes for te in res.results]
2635    aclsss = [te.actual_class for te in res.results]
2636    # compute single target scores
2637    single_scores = []
2638    for i in range(n_classes):
2639        for te, clss, aclss in zip(res.results, clsss, aclsss):
2640            te.classes = [cls[i] for cls in clss]
2641            te.actual_class = aclss[i]
2642        single_scores.append(score[i](res))
2643    # restore original classes
2644    for te, clss, aclss in zip(res.results, clsss, aclsss):
2645        te.classes = clss
2646        te.actual_class = aclss
2647    return [sum(w * s for w, s in zip(weights, scores)) / sum(weights)
2648        for scores in zip(*single_scores)]
2649
2650def mt_flattened_score(res, score):
2651    """
2652    Flatten (concatenate into a single list) the predictions of multiple
2653    targets and compute a single-target score.
2654
2655    :param score: Single-target scoring method.
2656    """
2657    res2 = Orange.evaluation.testing.ExperimentResults(res.number_of_iterations,
2658        res.classifier_names, class_values=res.class_values,
2660        test_type=Orange.evaluation.testing.TEST_TYPE_SINGLE, labels=res.labels)
2661    for te in res.results:
2662        for i, ac in enumerate(te.actual_class):
2663            te2 = Orange.evaluation.testing.TestedExample(
2664                iteration_number=te.iteration_number, actual_class=ac)
2665            for c, p in zip(te.classes, te.probabilities):
2667            res2.results.append(te2)
2668    return score(res2)
2669
2670
2671################################################################################
2672if __name__ == "__main__":
2673    avranks =  [3.143, 2.000, 2.893, 1.964]
2674    names = ["prva", "druga", "tretja", "cetrta" ]
2675    cd = compute_CD(avranks, 14)
2676    #cd = compute_CD(avranks, 10, type="bonferroni-dunn")
2677    print cd
2678
2679    print compute_friedman(avranks, 14)
2680
2681    #graph_ranks("test.eps", avranks, names, cd=cd, cdmethod=0, width=6, textspace=1.5)
Note: See TracBrowser for help on using the repository browser.