Changeset 7609:bf9836042ee1 in orange


Ignore:
Timestamp:
02/05/11 01:15:22 (3 years ago)
Author:
jzbontar <jure.zbontar@…>
Branch:
default
Convert:
3c58bd11dddb5c0866b829386dabcbbfd83f4716
Message:

testing module

Location:
orange
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • orange/Orange/evaluation/testing.py

    r7519 r7609  
    11""" 
    2 .. index:: testing 
     2.. index:: Testing, Sampling 
    33 
    44==================== 
     
    6262    set of examples while others need examples that are already split 
    6363    into two sets). If examples are weighted, pass them as a tuple 
    64     (examples, weightID). Weights are respected by learning and testing, 
     64    ``(examples, weightID)``. Weights are respected by learning and testing, 
    6565    but not by sampling. When selecting 10% of examples, this means 10% 
    6666    by number, not by weights. There is also no guarantee that sums 
     
    7070*strat* 
    7171    Tells whether to stratify the random selections. Its default value is 
    72     :obj:`Orange.core.StratifiedIfPossible` which stratifies selections 
     72    :obj:`orange.StratifiedIfPossible` which stratifies selections 
    7373    if the class variable is discrete and has no unknown values. 
    7474 
     
    8080 
    8181    *  
    82       Set ``randomGenerator`` to :obj:`Orange.core.globalRandom`. The function's 
     82      Set ``randomGenerator`` to :obj:`orange.globalRandom`. The function's 
    8383      selection will depend upon Orange's global random generator that 
    8484      is reset (with random seed 0) when Orange is imported. The Script's 
     
    9090 
    9191    *  
    92       Construct a new :obj:`Orange.core.RandomGenerator`. The code below, 
     92      Construct a new :obj:`orange.RandomGenerator`. The code below, 
    9393      for instance, will produce different results in each iteration, 
    9494      but overall the same results each time it's run. 
     
    172172 
    173173Knowing classes :obj:`TestedExample` that stores results of testing 
    174 for a single test example and ExperimentResults that stores a list of 
     174for a single test example and :obj:`ExperimentResults` that stores a list of 
    175175TestedExamples along with some other data on experimental procedures 
    176176and classifiers used, is important if you would like to write your own 
     
    194194 
    195195import Orange 
    196 from orngMisc import demangleExamples, getobjectname, printVerbose 
     196from Orange.misc import demangleExamples, getobjectname, printVerbose 
    197197import exceptions, cPickle, os, os.path 
    198198 
     
    478478    """ 
    479479     
    480     # randomGenerator is set either to what users provided or to Orange.core.RandomGenerator(0) 
     480    # randomGenerator is set either to what users provided or to orange.RandomGenerator(0) 
    481481    # If we left it None or if we set MakeRandomIndices2.randseed, it would give same indices each time it's called 
    482482    randomGenerator = argkw.get("indicesrandseed", 0) or argkw.get("randseed", 0) or argkw.get("randomGenerator", 0) 
     
    485485    examples, weight = demangleExamples(examples) 
    486486    classVar = examples.domain.classVar 
    487     if classVar.varType == Orange.core.VarTypes.Discrete: 
     487    if classVar.varType == Orange.data.Type.Discrete: 
    488488        values = list(classVar.values) 
    489489        baseValue = classVar.baseValue 
     
    525525    of methods for preparing indices, it simply takes the number of folds 
    526526    and a flag telling whether we want a stratified cross-validation or 
    527     not. This function does not return a single ``ExperimentResults`` but 
     527    not. This function does not return a single :obj:`ExperimentResults` but 
    528528    a list of them, one for each proportion. :: 
    529529 
     
    563563    Arguments ``cv`` and ``pick`` give the methods for preparing 
    564564    indices for cross-validation and random selection of learning 
    565     examples. If they are not given, :obj:`Orange.core.MakeRandomIndicesCV` and 
    566     :obj:`Orange.core.MakeRandomIndices2` are used, both will be stratified and the 
     565    examples. If they are not given, :obj:`orange.MakeRandomIndicesCV` and 
     566    :obj:`orange.MakeRandomIndices2` are used, both will be stratified and the 
    567567    cross-validation will be 10-fold. Proportions is a list of proportions 
    568568    of learning examples. 
     
    612612                cache = 0 
    613613 
    614         conv = examples.domain.classVar.varType == Orange.core.VarTypes.Discrete and int or float 
     614        conv = examples.domain.classVar.varType == Orange.data.Type.Discrete and int or float 
    615615        testResults = ExperimentResults(cv.folds, [l.name for l in learners], examples.domain.classVar.values.native(), weight!=0, examples.domain.classVar.baseValue) 
    616616        testResults.results = [TestedExample(folds[i], conv(examples[i].getclass()), nLrn, examples[i].getweight(weight)) 
     
    641641                    if (folds[i]==fold): 
    642642                        # This is to prevent cheating: 
    643                         ex = Orange.core.Example(examples[i]) 
     643                        ex = Orange.data.Instance(examples[i]) 
    644644                        ex.setclass("?") 
    645645                        for cl in range(nLrn): 
     
    665665    number of learning examples, builds the models and tests them on the 
    666666    entire testset. The whole test is repeated for the given number of 
    667     times for each proportion. The result is a list of ExperimentResults, 
     667    times for each proportion. The result is a list of :obj:`ExperimentResults`, 
    668668    one for each proportion. 
    669669 
     
    741741 
    742742    nIterations = max(indices)+1 
    743     if examples.domain.classVar.varType == Orange.core.VarTypes.Discrete: 
     743    if examples.domain.classVar.varType == Orange.data.Type.Discrete: 
    744744        values = list(examples.domain.classVar.values) 
    745745        basevalue = examples.domain.classVar.baseValue 
     
    747747        basevalue = values = None 
    748748 
    749     conv = examples.domain.classVar.varType == Orange.core.VarTypes.Discrete and int or float         
     749    conv = examples.domain.classVar.varType == Orange.data.Type.Discrete and int or float         
    750750    testResults = ExperimentResults(nIterations, [getobjectname(l) for l in learners], values, weight!=0, basevalue) 
    751751    testResults.results = [TestedExample(indices[i], conv(examples[i].getclass()), nLrn, examples[i].getweight(weight)) 
     
    804804                if (indices[i]==fold): 
    805805                    # This is to prevent cheating: 
    806                     ex = Orange.core.Example(testset[tcn]) 
     806                    ex = Orange.data.Instance(testset[tcn]) 
    807807                    ex.setclass("?") 
    808808                    tcn += 1 
     
    901901 
    902902    if hasLorT: 
    903         testset = Orange.core.ExampleTable(learnset) 
     903        testset = Orange.data.Table(learnset) 
    904904        for pp in pps: 
    905905            if pp[0]=="L": 
     
    939939    if not testResults: 
    940940        classVar = testset.domain.classVar 
    941         if testset.domain.classVar.varType == Orange.core.VarTypes.Discrete: 
     941        if testset.domain.classVar.varType == Orange.data.Type.Discrete: 
    942942            values = classVar.values.native() 
    943943            baseValue = classVar.baseValue 
     
    952952        # first time we have to add to it 
    953953        if not getattr(testResults, "examplesCloned", False): 
    954             testResults.examples = Orange.core.ExampleTable(testResults.examples) 
     954            testResults.examples = Orange.data.Table(testResults.examples) 
    955955            testResults.examplesCloned = True 
    956956        testResults.examples.extend(testset) 
     
    959959        testResults.examples = testset 
    960960     
    961     conv = testset.domain.classVar.varType == Orange.core.VarTypes.Discrete and int or float 
     961    conv = testset.domain.classVar.varType == Orange.data.Type.Discrete and int or float 
    962962    for ex in testset: 
    963963        te = TestedExample(iterationNumber, conv(ex.getclass()), 0, ex.getweight(testweight)) 
     
    965965        for classifier in classifiers: 
    966966            # This is to prevent cheating: 
    967             ex2 = Orange.core.Example(ex) 
     967            ex2 = Orange.data.Instance(ex) 
    968968            ex2.setclass("?") 
    969969            cr = classifier(ex2, Orange.core.GetBoth) 
  • orange/doc/Orange/rst/code/logreg-stepwise.py

    r7339 r7609  
    1 import orngStat 
    21import orngTest 
    32import orngFSS 
    43from Orange import * 
     4import Orange.evaluation.testing 
    55 
    66 
     
    3333learners = ( 
    3434  classification.logreg.LogRegLearner(name='logistic', removeSingular=1), 
    35   orngFSS.FilteredLearner(lr, 
     35  feature.selection.FilteredLearner(lr, 
    3636     filter=StepWiseFSS_Filter(addCrit=0.05, deleteCrit=0.9), 
    3737     name='filtered') 
    3838) 
    39 results = orngTest.crossValidation(learners, table, storeClassifiers=1) 
     39results = Orange.evaluation.testing.crossValidation(learners, table, storeClassifiers=1) 
    4040 
    4141# output the results 
    4242print "Learner      CA" 
    4343for i in range(len(learners)): 
    44     print "%-12s %5.3f" % (learners[i].name, orngStat.CA(results)[i]) 
     44    print "%-12s %5.3f" % (learners[i].name, evaluation.scoring.CA(results)[i]) 
    4545 
    4646# find out which features were retained by filtering 
  • orange/doc/Orange/rst/code/testing-test.py

    r7514 r7609  
    11import Orange 
    2 import orngStat 
    32import random 
    43 
    54table = Orange.data.Table("voting") 
    65 
    7 bayes = Orange.core.BayesLearner(name="bayes") 
    8 tree = Orange.core.TreeLearner(name="tree") 
     6bayes = Orange.classification.bayes.NaiveLearner(name="bayes") 
     7tree = Orange.classification.tree.TreeLearnerBase(name="tree") 
    98majority = Orange.classification.majority.MajorityLearner(name="default") 
    109learners = [bayes, tree, majority] 
    1110 
    1211def printResults(res): 
    13     CAs = orngStat.CA(res, reportSE=1) 
     12    CAs = Orange.evaluation.scoring.CA(res, reportSE=1) 
    1413    for name, ca in zip(res.classifierNames, CAs): 
    1514        print "%s: %5.3f+-%5.3f" % (name, ca[0], 1.96 * ca[1]), 
     
    3029# End 
    3130 
    32 print "\nproportionsTest that will give different results each time it is run" 
    33 for i in range(3): 
    34     res = Orange.evaluation.testing.proportionTest(learners, table, 0.7, 
    35         randseed=random.randint(0, 100)) 
    36     printResults(res) 
     31if "NO_RANDOMNESS" not in vars(): 
     32    print "\nproportionsTest that will give different results each time it is run" 
     33    for i in range(3): 
     34        res = Orange.evaluation.testing.proportionTest(learners, table, 0.7, 
     35            randseed=random.randint(0, 100)) 
     36        printResults(res) 
    3737# End 
    3838 
Note: See TracChangeset for help on using the changeset viewer.