Ignore:
Files:
551 added
353 deleted
39 edited

Legend:

Unmodified
Added
Removed
  • Orange/classification/logreg.py

    r9936 r9959  
    188188        self.__dict__.update(kwds) 
    189189 
    190     def __call__(self, instance, resultType = Orange.classification.Classifier.GetValue): 
    191         # classification not implemented yet. For now its use is only to provide regression coefficients and its statistics 
    192         pass 
     190    def __call__(self, instance, result_type = Orange.classification.Classifier.GetValue): 
     191        # classification not implemented yet. For now its use is only to 
     192        # provide regression coefficients and its statistics 
     193        raise NotImplemented 
    193194     
    194195 
    195196class LogRegLearnerGetPriors(object): 
    196     def __new__(cls, instances=None, weightID=0, **argkw): 
     197    def __new__(cls, instances=None, weight_id=0, **argkw): 
    197198        self = object.__new__(cls) 
    198199        if instances: 
    199200            self.__init__(**argkw) 
    200             return self.__call__(instances, weightID) 
     201            return self.__call__(instances, weight_id) 
    201202        else: 
    202203            return self 
  • Orange/classification/lookup.py

    r9919 r9960  
    511511       
    512512 
    513 def lookup_from_data(examples, weight=0, learnerForUnknown=None): 
     513from Orange.misc import deprecated_keywords 
     514@deprecated_keywords({"learnerForUnknown":"learner_for_unknown"}) 
     515def lookup_from_data(examples, weight=0, learner_for_unknown=None): 
    514516    if len(examples.domain.attributes) <= 3: 
    515517        lookup = lookup_from_bound(examples.domain.class_var, 
     
    528530        # ClassifierByDataTable, let it deal with them 
    529531        return LookupLearner(examples, weight, 
    530                              learnerForUnknown=learnerForUnknown) 
     532                             learner_for_unknown=learner_for_unknown) 
    531533 
    532534    else: 
    533535        return LookupLearner(examples, weight, 
    534                              learnerForUnknown=learnerForUnknown) 
     536                             learner_for_unknown=learner_for_unknown) 
    535537         
    536538         
  • Orange/classification/wrappers.py

    r9671 r9961  
    55import Orange.evaluation.scoring 
    66 
     7from Orange.misc import deprecated_members 
     8 
    79class StepwiseLearner(Orange.core.Learner): 
    8   def __new__(cls, data=None, weightId=None, **kwargs): 
     10  def __new__(cls, data=None, weight_id=None, **kwargs): 
    911      self = Orange.core.Learner.__new__(cls, **kwargs) 
    1012      if data is not None: 
    1113          self.__init__(**kwargs) 
    12           return self(data, weightId) 
     14          return self(data, weight_id) 
    1315      else: 
    1416          return self 
    1517       
    1618  def __init__(self, **kwds): 
    17     self.removeThreshold = 0.3 
    18     self.addThreshold = 0.2 
     19    self.remove_threshold = 0.3 
     20    self.add_threshold = 0.2 
    1921    self.stat, self.statsign = scoring.CA, 1 
    20     self.__dict__.update(kwds) 
     22    for name, val in kwds.items(): 
     23        setattr(self, name, val) 
    2124 
    22   def __call__(self, examples, weightID = 0, **kwds): 
     25  def __call__(self, data, weight_id = 0, **kwds): 
    2326    import Orange.evaluation.testing, Orange.evaluation.scoring, statc 
    2427     
    2528    self.__dict__.update(kwds) 
    2629 
    27     if self.removeThreshold < self.addThreshold: 
    28         raise ValueError("'removeThreshold' should be larger or equal to 'addThreshold'") 
     30    if self.remove_threshold < self.add_threshold: 
     31        raise ValueError("'remove_threshold' should be larger or equal to 'add_threshold'") 
    2932 
    30     classVar = examples.domain.classVar 
     33    classVar = data.domain.classVar 
    3134     
    32     indices = Orange.core.MakeRandomIndicesCV(examples, folds = getattr(self, "folds", 10)) 
     35    indices = Orange.core.MakeRandomIndicesCV(data, folds = getattr(self, "folds", 10)) 
    3336    domain = Orange.data.Domain([], classVar) 
    3437 
    35     res = Orange.evaluation.testing.test_with_indices([self.learner], Orange.data.Table(domain, examples), indices) 
     38    res = Orange.evaluation.testing.test_with_indices([self.learner], Orange.data.Table(domain, data), indices) 
    3639     
    3740    oldStat = self.stat(res)[0] 
    38     oldStats = [self.stat(x)[0] for x in Orange.evaluation.scoring.splitByIterations(res)] 
     41    oldStats = [self.stat(x)[0] for x in Orange.evaluation.scoring.split_by_iterations(res)] 
    3942    print ".", oldStat, domain 
    4043    stop = False 
     
    4548            for attr in domain.attributes: 
    4649                newdomain = Orange.data.Domain(filter(lambda x: x!=attr, domain.attributes), classVar) 
    47                 res = Orange.evaluation.testing.test_with_indices([self.learner], (Orange.data.Table(newdomain, examples), weightID), indices) 
     50                res = Orange.evaluation.testing.test_with_indices([self.learner], (Orange.data.Table(newdomain, data), weight_id), indices) 
    4851                 
    4952                newStat = self.stat(res)[0] 
    50                 newStats = [self.stat(x)[0] for x in Orange.evaluation.scoring.splitByIterations(res)]  
     53                newStats = [self.stat(x)[0] for x in Orange.evaluation.scoring.split_by_iterations(res)]  
    5154                print "-", newStat, newdomain 
    5255                ## If stat has increased (ie newStat is better than bestStat) 
     
    5457                    if cmp(newStat, oldStat) == self.statsign: 
    5558                        bestStat, bestStats, bestAttr = newStat, newStats, attr 
    56                     elif statc.wilcoxont(oldStats, newStats)[1] > self.removeThreshold: 
     59                    elif statc.wilcoxont(oldStats, newStats)[1] > self.remove_threshold: 
    5760                            bestStat, bestAttr, bestStats = newStat, newStats, attr 
    5861            if bestStat: 
     
    6366 
    6467        bestStat, bestAttr = oldStat, None 
    65         for attr in examples.domain.attributes: 
     68        for attr in data.domain.attributes: 
    6669            if not attr in domain.attributes: 
    6770                newdomain = Orange.data.Domain(domain.attributes + [attr], classVar) 
    68                 res = Orange.evaluation.testing.test_with_indices([self.learner], (Orange.data.Table(newdomain, examples), weightID), indices) 
     71                res = Orange.evaluation.testing.test_with_indices([self.learner], (Orange.data.Table(newdomain, data), weight_id), indices) 
    6972                 
    7073                newStat = self.stat(res)[0] 
    71                 newStats = [self.stat(x)[0] for x in Orange.evaluation.scoring.splitByIterations(res)]  
     74                newStats = [self.stat(x)[0] for x in Orange.evaluation.scoring.split_by_iterations(res)]  
    7275                print "+", newStat, newdomain 
    7376 
    7477                ## If stat has increased (ie newStat is better than bestStat) 
    75                 if cmp(newStat, bestStat) == self.statsign and statc.wilcoxont(oldStats, newStats)[1] < self.addThreshold: 
     78                if cmp(newStat, bestStat) == self.statsign and statc.wilcoxont(oldStats, newStats)[1] < self.add_threshold: 
    7679                    bestStat, bestStats, bestAttr = newStat, newStats, attr 
    7780        if bestAttr: 
     
    8184            print "added", bestAttr.name 
    8285 
    83     return self.learner(Orange.data.Table(domain, examples), weightID) 
     86    return self.learner(Orange.data.Table(domain, data), weight_id) 
    8487 
     88StepwiseLearner = deprecated_members( 
     89                    {"removeThreshold": "remove_threshold", 
     90                     "addThreshold": "add_threshold"}, 
     91                    )(StepwiseLearner) 
  • Orange/testing/regression/results_reference/knnlearner.py.txt

    r9689 r9954  
     1Testing using euclidean distance 
    12Iris-setosa Iris-setosa 
    23Iris-versicolor Iris-versicolor 
     
    67 
    78 
    8  
     9Testing using hamming distance 
    910Iris-virginica Iris-virginica 
    1011Iris-setosa Iris-setosa 
  • Orange/testing/regression/results_reference/randomindicescv.py.txt

    r9689 r9954  
     1Indices for ordinary 10-fold CV 
    12<1, 1, 3, 8, 8, 3, 2, 7, 5, 0, 1, 5, 2, 9, 4, 7, 4, 9, 3, 6, 0, 2, 0, 6> 
     3Indices for 5 folds on 10 examples 
    24<3, 0, 1, 0, 3, 2, 4, 4, 1, 2> 
  • Orange/testing/regression/results_reference/treelearner.py.txt

    r9689 r9954  
    1 None 
    2 None 
    311.0 0.0 
    42 
    53 
    64Tree with minExamples = 5.0 
     5tear_rate=reduced: none (100.00%) 
     6tear_rate=normal 
     7|    astigmatic=no 
     8|    |    age=pre-presbyopic: soft (100.00%) 
     9|    |    age=presbyopic: none (50.00%) 
     10|    |    age=young: soft (100.00%) 
     11|    astigmatic=yes 
     12|    |    prescription=hypermetrope: none (66.67%) 
     13|    |    prescription=myope: hard (100.00%) 
    714 
    8 tear_rate (<15.000, 4.000, 5.000>)  
    9 : normal  
    10    astigmatic (<3.000, 4.000, 5.000>)  
    11    : no  
    12       age (<1.000, 0.000, 5.000>)  
    13       : pre-presbyopic --> soft (<0.000, 0.000, 2.000>)   
    14       : presbyopic --> none (<1.000, 0.000, 1.000>)   
    15       : young --> soft (<0.000, 0.000, 2.000>)   
    16    : yes  
    17       prescription (<2.000, 4.000, 0.000>)  
    18       : hypermetrope --> none (<2.000, 1.000, 0.000>)   
    19       : myope --> hard (<0.000, 3.000, 0.000>)   
    20 : reduced --> none (<12.000, 0.000, 0.000>)   
     15 
    2116 
    2217Tree with maxMajority = 0.5 
    23 --> none (<15.000, 4.000, 5.000>)  
     18none (62.50%) 
  • Orange/testing/regression/results_reference/treestructure.py.txt

    r9689 r9954  
    1 Tree size: 10 
     1Tree size: 15 
    22 
    33 
    44Unpruned tree 
     5tear_rate=reduced: none (100.00%) 
     6tear_rate=normal 
     7|    astigmatic=no 
     8|    |    age=pre-presbyopic: soft (100.00%) 
     9|    |    age=young: soft (100.00%) 
     10|    |    age=presbyopic 
     11|    |    |    prescription=hypermetrope: soft (100.00%) 
     12|    |    |    prescription=myope: none (100.00%) 
     13|    astigmatic=yes 
     14|    |    prescription=myope: hard (100.00%) 
     15|    |    prescription=hypermetrope 
     16|    |    |    age=pre-presbyopic: none (100.00%) 
     17|    |    |    age=presbyopic: none (100.00%) 
     18|    |    |    age=young: hard (100.00%) 
    519 
    6 tear_rate (<15.000, 4.000, 5.000>)  
    7 : normal  
    8    astigmatic (<3.000, 4.000, 5.000>)  
    9    : no  
    10       age (<1.000, 0.000, 5.000>)  
    11       : pre-presbyopic --> soft (<0.000, 0.000, 2.000>)   
    12       : presbyopic --> none (<1.000, 0.000, 1.000>)   
    13       : young --> soft (<0.000, 0.000, 2.000>)   
    14    : yes  
    15       prescription (<2.000, 4.000, 0.000>)  
    16       : hypermetrope --> none (<2.000, 1.000, 0.000>)   
    17       : myope --> hard (<0.000, 3.000, 0.000>)   
    18 : reduced --> none (<12.000, 0.000, 0.000>)   
     20 
    1921 
    2022Pruned tree 
     23tear_rate=reduced: none (100.00%) 
     24tear_rate=normal 
     25|    astigmatic=no: soft (83.33%) 
     26|    astigmatic=yes: hard (66.67%) 
    2127 
    22 tear_rate (<15.000, 4.000, 5.000>)  
    23 : normal  
    24    astigmatic (<3.000, 4.000, 5.000>)  
    25    : no --> soft (<1.000, 0.000, 5.000>)   
    26    : yes --> hard (<2.000, 4.000, 0.000>)   
    27 : reduced --> none (<12.000, 0.000, 0.000>)  
  • Orange/testing/regression/xtest.py

    r9873 r9949  
    1212platform = sys.platform 
    1313pyversion = sys.version[:3] 
    14 states = ["OK", "changed", "random", "error", "crash"] 
     14states = ["OK", "timedout", "changed", "random", "error", "crash"] 
    1515 
    1616def file_name_match(name, patterns): 
     
    2222 
    2323def test_scripts(complete, just_print, module="orange", root_directory=".", 
    24                 test_files=None, directories=None): 
     24                test_files=None, directories=None, timeout=5): 
    2525    """Test the scripts in the given directory.""" 
    2626    global error_status 
     
    123123                sys.stdout.flush() 
    124124 
    125                 for state in ["crash", "error", "new", "changed", "random1", "random2"]: 
     125                for state in states: 
    126126                    remname = "%s/%s.%s.%s.%s.txt" % \ 
    127127                              (outputsdir, name, platform, pyversion, state) 
     
    130130 
    131131                titerations = re_israndom.search(open(name, "rt").read()) and 1 or iterations 
    132                 os.spawnl(os.P_WAIT, sys.executable, "-c", regtestdir + "/xtest_one.py", name, str(titerations), outputsdir) 
    133  
    134                 result = open("xtest1_report", "rt").readline().rstrip() or "crash" 
     132                #os.spawnl(os.P_WAIT, sys.executable, "-c", regtestdir + "/xtest_one.py", name, str(titerations), outputsdir) 
     133                p = subprocess.Popen([sys.executable, regtestdir + "/xtest_one.py", name, str(titerations), outputsdir]) 
     134 
     135                passed_time = 0 
     136                while passed_time < timeout: 
     137                    time.sleep(0.01) 
     138                    passed_time += 0.01 
     139 
     140                    if p.poll() is not None: 
     141                        break 
     142 
     143                if p.poll() is None: 
     144                    p.kill() 
     145                    result2 = "timedout" 
     146                    print "timedout" 
     147                    # remove output file and change it for *.timedout.* 
     148                    for state in states: 
     149                        remname = "%s/%s.%s.%s.%s.txt" % \ 
     150                                  (outputsdir, name, platform, pyversion, state) 
     151                        if os.path.exists(remname): 
     152                            os.remove(remname) 
     153 
     154                    timeoutname = "%s/%s.%s.%s.%s.txt" % (outputsdir, name, sys.platform, sys.version[:3], "timedout") 
     155                    open(timeoutname, "wt").close() 
     156                else: 
     157                    stdout, stderr = p.communicate() 
     158                    result = open("xtest1_report", "rt").readline().rstrip() or "crash" 
     159 
    135160                error_status = max(error_status, states.index(result)) 
    136161                os.remove("xtest1_report") 
     
    139164 
    140165    os.chdir(caller_directory) 
    141  
    142166 
    143167iterations = 1 
     
    147171def usage(): 
    148172    """Print out help.""" 
    149     print "%s [update|test|report|report-html|errors] -[h|s] [--single|--module=[orange|obi|text]|--dir=<dir>|] <files>" % sys.argv[0] 
    150     print "  test:   regression tests on all scripts" 
    151     print "  update: regression tests on all previously failed scripts (default)" 
     173    print "%s [test|update|report|report-html|errors] -[h|s] [--single|--module=[all|orange|docs]|--timeout=<#>|--dir=<dir>|] <files>" % sys.argv[0] 
     174    print "  test:   regression tests on all scripts (default)" 
     175    print "  update: regression tests on all previously failed scripts" 
    152176    print "  report: report on testing results" 
    153177    print "  errors: report on errors from regression tests" 
     
    155179    print "-s, --single: runs a single test on each script" 
    156180    print "--module=<module>: defines a module to test" 
     181    print "--timeout=<#seconds>: defines max. execution time" 
    157182    print "--dir=<dir>: a comma-separated list of names where any should match the directory to be tested" 
    158183    print "<files>: space separated list of string matching the file names to be tested" 
     
    163188    global iterations 
    164189 
    165     command = "update" 
     190    command = "test" 
    166191    if argv: 
    167192        if argv[0] in ["update", "test", "report", "report-html", "errors", "help"]: 
     
    170195 
    171196    try: 
    172         opts, test_files = getopt.getopt(argv, "hs", ["single", "module=", "help", "files=", "verbose="]) 
     197        opts, test_files = getopt.getopt(argv, "hs", ["single", "module=", "timeout=", "help", "files=", "verbose="]) 
    173198    except getopt.GetoptError: 
    174199        print "Warning: Wrong argument" 
     
    183208 
    184209    module = opts.get("--module", "all") 
    185     if module in ["all"]: 
     210    if module == "all": 
    186211        root = "%s/.." % environ.install_dir 
    187212        module = "orange" 
    188         dirs = [("modules", "Orange/doc/modules"), 
    189                 ("reference", "Orange/doc/reference"), 
    190                 ("ofb", "docs/tutorial/rst/code"), 
    191                 ("orange25", "docs/reference/rst/code")] 
    192     elif module in ["orange"]: 
     213        dirs = [("tests", "Orange/testing/regression/tests"), 
     214                ("tests_20", "Orange/testing/regression/tests_20"), 
     215                ("tutorial", "docs/tutorial/rst/code"), 
     216                ("reference", "docs/reference/rst/code")] 
     217    elif module == "orange": 
     218        root = "%s" % environ.install_dir 
     219        module = "orange" 
     220        dirs = [("tests", "testing/regression/tests"), 
     221                ("tests_20", "testing/regression/tests_20")] 
     222    elif module == "docs": 
    193223        root = "%s/.." % environ.install_dir 
    194224        module = "orange" 
    195         dirs = [("modules", "Orange/doc/modules"), 
    196                 ("reference", "Orange/doc/reference"), 
    197                 ("ofb", "docs/tutorial/rst/code")] 
    198     elif module in ["ofb-rst"]: 
    199         root = "%s/.." % environ.install_dir 
    200         module = "orange" 
    201         dirs = [("ofb", "docs/tutorial/rst/code")] 
    202     elif module in ["orange25"]: 
    203         root = "%s/.." % environ.install_dir 
    204         module = "orange" 
    205         dirs = [("orange25", "docs/reference/rst/code")] 
    206     elif module == "obi": 
    207         root = environ.add_ons_dir + "/Bioinformatics/doc" 
    208         dirs = [("modules", "modules")] 
    209     elif module == "text": 
    210         root = environ.add_ons_dir + "/Text/doc" 
    211         dirs = [("modules", "modules")] 
     225        dirs = [("tutorial", "docs/tutorial/rst/code"), 
     226                ("reference", "docs/reference/rst/code")] 
    212227    else: 
    213         print "Error: %s is wrong name of the module, should be in [orange|obi|text]" % module 
     228        print "Error: %s is wrong name of the module, should be in [orange|docs]" % module 
     229        sys.exit(1) 
     230 
     231    timeout = 5 
     232    try: 
     233        _t = opts.get("--timeout", "5") 
     234        timeout = int(_t) 
     235        if timeout <= 0 or timeout >= 120: 
     236            raise AttributeError() 
     237    except AttributeError: 
     238        print "Error: timeout out of range (0 < # < 120)" 
     239        sys.exit(1) 
     240    except: 
     241        print "Error: %s wrong timeout" % opts.get("--timeout", "5") 
    214242        sys.exit(1) 
    215243 
    216244    test_scripts(command == "test", command == "report" or (command == "report-html" and command or False), 
    217245                 module=module, root_directory=root, 
    218                  test_files=test_files, directories=dirs) 
     246                 test_files=test_files, directories=dirs, timeout=timeout) 
    219247    # sys.exit(error_status) 
    220248 
  • docs/reference/rst/Orange.data.domain.rst

    r9936 r9958  
    308308         variable from the list is used as the class variable. :: 
    309309 
    310              >>> domain1 = orange.Domain([a, b]) 
    311              >>> domain2 = orange.Domain(["a", b, c], domain) 
     310             >>> domain1 = Orange.data.Domain([a, b]) 
     311             >>> domain2 = Orange.data.Domain(["a", b, c], domain) 
    312312 
    313313         :param variables: List of variables (strings or instances of :obj:`~Orange.feature.Descriptor`) 
     
    323323         last variable should be used as the class variable. :: 
    324324 
    325              >>> domain1 = orange.Domain([a, b], False) 
    326              >>> domain2 = orange.Domain(["a", b, c], False, domain) 
     325             >>> domain1 = Orange.data.Domain([a, b], False) 
     326             >>> domain2 = Orange.data.Domain(["a", b, c], False, domain) 
    327327 
    328328         :param variables: List of variables (strings or instances of :obj:`~Orange.feature.Descriptor`) 
  • docs/reference/rst/Orange.data.instance.rst

    r9936 r9958  
    9191passed along with the data:: 
    9292 
    93     bayes = orange.BayesLearner(data, id) 
     93    bayes = Orange.classification.bayes.NaiveLearner(data, id) 
    9494 
    9595Many other functions accept weights in similar fashion. 
     
    112112accessed:: 
    113113 
    114     w = orange.FloatVariable("w") 
     114    w = Orange.feature.Continuous("w") 
    115115    data.domain.addmeta(id, w) 
    116116 
     
    125125allows for conversion from Python native types:: 
    126126 
    127     ok = orange.EnumVariable("ok?", values=["no", "yes"]) 
     127    ok = Orange.feature.Discrete("ok?", values=["no", "yes"]) 
    128128    ok_id = Orange.feature.Descriptor.new_meta_id() 
    129129    data.domain.addmeta(ok_id, ok) 
     
    237237        Convert the instance into an ordinary Python list. If the 
    238238        optional argument `level` is 1 (default), the result is a list of 
    239         instances of :obj:`orange.data.Value`. If it is 0, it contains 
     239        instances of :obj:`Orange.data.Value`. If it is 0, it contains 
    240240        pure Python objects, that is, strings for discrete variables 
    241241        and numbers for continuous ones. 
     
    281281        attributes are returned. :: 
    282282 
    283             data = orange.ExampleTable("inquisition2") 
     283            data = Orange.data.Table("inquisition2") 
    284284            example = data[4] 
    285             print example.getmetas() 
    286             print example.getmetas(int) 
    287             print example.getmetas(str) 
    288             print example.getmetas(orange.Variable) 
     285            print example.get_metas() 
     286            print example.get_metas(int) 
     287            print example.get_metas(str) 
     288            print example.get_metas(Orange.feature.Descriptor) 
    289289 
    290290        :param key_type: the key type; either ``int``, ``str`` or :obj:`~Orange.feature.Descriptor` 
  • docs/reference/rst/Orange.data.value.rst

    r9927 r9958  
    133133    deg3 = Orange.feature.Discrete( 
    134134        "deg3", values=["little", "medium", "big"]) 
    135     deg4 = orange.feature.Discrete( 
     135    deg4 = Orange.feature.Discrete( 
    136136        "deg4", values=["tiny", "little", "big", "huge"]) 
    137     val3 = orange.Value(deg3) 
    138     val4 = orange.Value(deg4) 
     137    val3 = Orange.data.Value(deg3) 
     138    val4 = Orange.data.Value(deg4) 
    139139    val3.value = "medium" 
    140140    val4.value = "little" 
  • docs/reference/rst/Orange.evaluation.scoring.rst

    r9904 r9958  
    321321We shall use the following code to prepare suitable experimental results:: 
    322322 
    323     ri2 = Orange.core.MakeRandomIndices2(voting, 0.6) 
     323    ri2 = Orange.data.sample.SubsetIndices2(voting, 0.6) 
    324324    train = voting.selectref(ri2, 0) 
    325325    test = voting.selectref(ri2, 1) 
  • docs/reference/rst/Orange.feature.discretization.rst

    r9944 r9964  
    1010   single: feature; discretization 
    1111 
    12 Continues features can be discretized either one feature at a time, or, as demonstrated in the following script, 
    13 using a single discretization method on entire set of data features: 
    14  
    15 .. literalinclude:: code/discretization-table.py 
    16  
    17 Discretization introduces new categorical features and computes their values in accordance to 
    18 selected (or default) discretization method:: 
    19  
    20     Original data set: 
    21     [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'] 
    22     [4.9, 3.0, 1.4, 0.2, 'Iris-setosa'] 
    23     [4.7, 3.2, 1.3, 0.2, 'Iris-setosa'] 
    24  
    25     Discretized data set: 
    26     ['<=5.45', '>3.15', '<=2.45', '<=0.80', 'Iris-setosa'] 
    27     ['<=5.45', '(2.85, 3.15]', '<=2.45', '<=0.80', 'Iris-setosa'] 
    28     ['<=5.45', '>3.15', '<=2.45', '<=0.80', 'Iris-setosa'] 
    29  
    30 The following discretization methods are supported: 
    31  
    32 * equal width discretization, where the domain of continuous feature is split to intervals of the same 
    33   width equal-sized intervals (:class:`EqualWidth`), 
    34 * equal frequency discretization, where each intervals contains equal number of data instances (:class:`EqualFreq`), 
    35 * entropy-based, as originally proposed by [FayyadIrani1993]_ that infers the intervals to minimize 
    36   within-interval entropy of class distributions (:class:`Entropy`), 
    37 * bi-modal, using three intervals to optimize the difference of the class distribution in 
    38   the middle with the distribution outside it (:class:`BiModal`), 
    39 * fixed, with the user-defined cut-off points. 
    40  
    41 The above script used the default discretization method (equal frequency with three intervals). This can be changed 
    42 as demonstrated below: 
    43  
    44 .. literalinclude:: code/discretization-table-method.py 
    45     :lines: 3-5 
    46  
    47 With exception to fixed discretization, discretization approaches infer the cut-off points from the 
    48 training data set and thus construct a discretizer to convert continuous values of this feature into categorical 
    49 value according to the rule found by discretization. In this respect, the discretization behaves similar to 
    50 :class:`Orange.classification.Learner`. 
    51  
    52 Discretization Algorithms 
    53 ========================= 
    54  
    55 Instances of discretization classes are all derived from :class:`Discretization`. 
    56  
    57 .. class:: Discretization 
    58  
    59     .. method:: __call__(feature, data[, weightID]) 
    60  
    61         Given a continuous ``feature``, ``data`` and, optionally id of 
    62         attribute with example weight, this function returns a discretized 
    63         feature. Argument ``feature`` can be a descriptor, index or 
    64         name of the attribute. 
    65  
    66  
    67 .. class:: EqualWidth 
    68  
    69     Discretizes the feature by spliting its domain to a fixed number 
    70     of equal-width intervals. The span of original domain is computed 
    71     from the training data and is defined by the smallest and the 
    72     largest feature value. 
    73  
    74     .. attribute:: n 
    75  
    76         Number of discretization intervals (default: 4). 
    77  
    78 The following example discretizes Iris dataset features using six 
    79 intervals. The script constructs a :class:`Orange.data.Table` with discretized 
    80 features and outputs their description: 
    81  
    82 .. literalinclude:: code/discretization.py 
    83     :lines: 38-43 
    84  
    85 The output of this script is:: 
    86  
    87     D_sepal length: <<4.90, [4.90, 5.50), [5.50, 6.10), [6.10, 6.70), [6.70, 7.30), >7.30> 
    88     D_sepal width: <<2.40, [2.40, 2.80), [2.80, 3.20), [3.20, 3.60), [3.60, 4.00), >4.00> 
    89     D_petal length: <<1.98, [1.98, 2.96), [2.96, 3.94), [3.94, 4.92), [4.92, 5.90), >5.90> 
    90     D_petal width: <<0.50, [0.50, 0.90), [0.90, 1.30), [1.30, 1.70), [1.70, 2.10), >2.10> 
    91  
    92 The cut-off values are hidden in the discretizer and stored in ``attr.get_value_from.transformer``:: 
    93  
    94     >>> for attr in newattrs: 
    95     ...    print "%s: first interval at %5.3f, step %5.3f" % \ 
    96     ...    (attr.name, attr.get_value_from.transformer.first_cut, \ 
    97     ...    attr.get_value_from.transformer.step) 
    98     D_sepal length: first interval at 4.900, step 0.600 
    99     D_sepal width: first interval at 2.400, step 0.400 
    100     D_petal length: first interval at 1.980, step 0.980 
    101     D_petal width: first interval at 0.500, step 0.400 
    102  
    103 All discretizers have the method 
    104 ``construct_variable``: 
    105  
    106 .. literalinclude:: code/discretization.py 
    107     :lines: 69-73 
    108  
    109  
    110 .. class:: EqualFreq 
    111  
    112     Infers the cut-off points so that the discretization intervals contain 
    113     approximately equal number of training data instances. 
    114  
    115     .. attribute:: n 
    116  
    117         Number of discretization intervals (default: 4). 
    118  
    119 The resulting discretizer is of class :class:`IntervalDiscretizer`. Its ``transformer`` includes ``points`` 
    120 that store the inferred cut-offs. 
    121  
    122 .. class:: Entropy 
    123  
    124     Entropy-based discretization as originally proposed by [FayyadIrani1993]_. The approach infers the most 
    125     appropriate number of intervals by recursively splitting the domain of continuous feature to minimize the 
    126     class-entropy of training examples. The splitting is repeated until the entropy decrease is smaller than the 
    127     increase of minimal descripton length (MDL) induced by the new cut-off point. 
    128  
    129     Entropy-based discretization can reduce a continuous feature into 
    130     a single interval if no suitable cut-off points are found. In this case the new feature is constant and can be 
    131     removed. This discretization can 
    132     therefore also serve for identification of non-informative features and thus used for feature subset selection. 
    133  
    134     .. attribute:: force_attribute 
    135  
    136         Forces the algorithm to induce at least one cut-off point, even when 
    137         its information gain is lower than MDL (default: ``False``). 
    138  
    139 Part of :download:`discretization.py <code/discretization.py>`: 
    140  
    141 .. literalinclude:: code/discretization.py 
    142     :lines: 77-80 
    143  
    144 The output shows that all attributes are discretized onto three intervals:: 
    145  
    146     sepal length: <5.5, 6.09999990463> 
    147     sepal width: <2.90000009537, 3.29999995232> 
    148     petal length: <1.89999997616, 4.69999980927> 
    149     petal width: <0.600000023842, 1.0000004768> 
    150  
    151 .. class:: BiModal 
    152  
    153     Infers two cut-off points to optimize the difference of class distribution of data instances in the 
    154     middle and in the other two intervals. The 
    155     difference is scored by chi-square statistics. All possible cut-off 
    156     points are examined, thus the discretization runs in O(n^2). This discretization method is especially suitable 
    157     for the attributes in 
    158     which the middle region corresponds to normal and the outer regions to 
    159     abnormal values of the feature. 
    160  
    161     .. attribute:: split_in_two 
    162  
    163         Decides whether the resulting attribute should have three or two values. 
    164         If ``True`` (default), the feature will be discretized to three 
    165         intervals and the discretizer is of type :class:`BiModalDiscretizer`. 
    166         If ``False`` the result is the ordinary :class:`IntervalDiscretizer`. 
    167  
    168 Iris dataset has three-valued class attribute. The figure below, drawn using LOESS probability estimation, shows that 
    169 sepal lenghts of versicolors are between lengths of setosas and virginicas. 
    170  
    171 .. image:: files/bayes-iris.gif 
    172  
    173 If we merge classes setosa and virginica, we can observe if 
    174 the bi-modal discretization would correctly recognize the interval in 
    175 which versicolors dominate. The following scripts peforms the merging and construction of new data set with class 
    176 that reports if iris is versicolor or not. 
    177  
    178 .. literalinclude:: code/discretization.py 
    179     :lines: 84-87 
    180  
    181 The following script implements the discretization: 
    182  
    183 .. literalinclude:: code/discretization.py 
    184     :lines: 97-100 
    185  
    186 The middle intervals are printed:: 
    187  
    188     sepal length: (5.400, 6.200] 
    189     sepal width: (2.000, 2.900] 
    190     petal length: (1.900, 4.700] 
    191     petal width: (0.600, 1.600] 
    192  
    193 Judging by the graph, the cut-off points inferred by discretization for "sepal length" make sense. 
    194  
    195 Discretizers 
    196 ============ 
    197  
    198 Discretizers construct a categorical feature from the continuous feature according to the method they implement and 
    199 its parameters. The most general is 
    200 :class:`IntervalDiscretizer` that is also used by most discretization 
    201 methods. Two other discretizers, :class:`EquiDistDiscretizer` and 
    202 :class:`ThresholdDiscretizer`> could easily be replaced by 
    203 :class:`IntervalDiscretizer` but are used for speed and simplicity. 
    204 The fourth discretizer, :class:`BiModalDiscretizer` is specialized 
    205 for discretizations induced by :class:`BiModalDiscretization`. 
    206  
    207 .. class:: Discretizer 
    208  
    209     A superclass implementing the construction of a new 
    210     attribute from an existing one. 
    211  
    212     .. method:: construct_variable(feature) 
    213  
    214         Constructs a descriptor for a new feature. The new feature's name is equal to ``feature.name`` 
    215         prefixed by "D\_". Its symbolic values are discretizer specific. 
    216  
    217 .. class:: IntervalDiscretizer 
    218  
    219     Discretizer defined with a set of cut-off points. 
    220  
    221     .. attribute:: points 
    222  
    223         The cut-off points; feature values below or equal to the first point will be mapped to the first interval, 
    224         those between the first and the second point 
    225         (including those equal to the second) are mapped to the second interval and 
    226         so forth to the last interval which covers all values greater than 
    227         the last value in ``points``. The number of intervals is thus 
    228         ``len(points)+1``. 
    229  
    230 The script that follows is an examples of a manual construction of a discretizer with cut-off points 
    231 at 3.0 and 5.0: 
    232  
    233 .. literalinclude:: code/discretization.py 
    234     :lines: 22-26 
    235  
    236 First five data instances of ``data2`` are:: 
    237  
    238     [5.1, '>5.00', 'Iris-setosa'] 
    239     [4.9, '(3.00, 5.00]', 'Iris-setosa'] 
    240     [4.7, '(3.00, 5.00]', 'Iris-setosa'] 
    241     [4.6, '(3.00, 5.00]', 'Iris-setosa'] 
    242     [5.0, '(3.00, 5.00]', 'Iris-setosa'] 
    243  
    244 The same discretizer can be used on several features by calling the function construct_var: 
    245  
    246 .. literalinclude:: code/discretization.py 
    247     :lines: 30-34 
    248  
    249 Each feature has its own instance of :class:`ClassifierFromVar` stored in 
    250 ``get_value_from``, but all use the same :class:`IntervalDiscretizer`, 
    251 ``idisc``. Changing any element of its ``points`` affect all attributes. 
    252  
    253 .. note:: 
    254  
    255     The length of :obj:`~IntervalDiscretizer.points` should not be changed if the 
    256     discretizer is used by any attribute. The length of 
    257     :obj:`~IntervalDiscretizer.points` should always match the number of values 
    258     of the feature, which is determined by the length of the attribute's field 
    259     ``values``. If ``attr`` is a discretized attribute, than ``len(attr.values)`` must equal 
    260     ``len(attr.get_value_from.transformer.points)+1``. 
    261  
    262  
    263 .. class:: EqualWidthDiscretizer 
    264  
    265     Discretizes to intervals of the fixed width. All values lower than :obj:`~EquiDistDiscretizer.first_cut` are mapped to the first 
    266     interval. Otherwise, value ``val``'s interval is ``floor((val-first_cut)/step)``. Possible overflows are mapped to the 
    267     last intervals. 
    268  
    269  
    270     .. attribute:: first_cut 
    271  
    272         The first cut-off point. 
    273  
    274     .. attribute:: step 
    275  
    276         Width of the intervals. 
    277  
    278     .. attribute:: n 
    279  
    280         Number of the intervals. 
    281  
    282     .. attribute:: points (read-only) 
    283  
    284         The cut-off points; this is not a real attribute although it behaves 
    285         as one. Reading it constructs a list of cut-off points and returns it, 
    286         but changing the list doesn't affect the discretizer. Only present to provide 
    287         the :obj:`EquiDistDiscretizer` the same interface as that of 
    288         :obj:`IntervalDiscretizer`. 
    289  
    290  
    291 .. class:: ThresholdDiscretizer 
    292  
    293     Threshold discretizer converts continuous values into binary by comparing 
    294     them to a fixed threshold. Orange uses this discretizer for 
    295     binarization of continuous attributes in decision trees. 
    296  
    297     .. attribute:: threshold 
    298  
    299         The value threshold; values below or equal to the threshold belong to the first 
    300         interval and those that are greater go to the second. 
    301  
    302  
    303 .. class:: BiModalDiscretizer 
    304  
    305     Bimodal discretizer has two cut off points and values are 
    306     discretized according to whether or not they belong to the region between these points 
    307     which includes the lower but not the upper boundary. The 
    308     discretizer is returned by :class:`BiModalDiscretization` if its 
    309     field :obj:`~BiModalDiscretization.split_in_two` is true (the default). 
    310  
    311     .. attribute:: low 
    312  
    313         Lower boundary of the interval (included in the interval). 
    314  
    315     .. attribute:: high 
    316  
    317         Upper boundary of the interval (not included in the interval). 
    318  
    319  
    320 Implementational details 
    321 ======================== 
     12Feature discretization module provides rutines that consider continuous features and 
     13introduce a new discretized feature based on the training data set. Most often such procedure would be executed 
     14on all the features of the data set using implementations from :doc:`Orange.feature.discretization`. Implementation 
     15in this module are concerned with discretization of one feature at the time, and do not provide wrappers for 
     16whole-data set discretization. The discretization is data-specific, and consist of learning of discretization 
     17procedure (see `Discretization Algorithms`_) and actual discretization (see Discretizers_) of the data. Splitting of 
     18these 
     19two phases is intentional, 
     20as in machine learing discretization may be learned from the training set and executed on the test set. 
    32221 
    32322Consider a following example (part of :download:`discretization.py <code/discretization.py>`): 
     
    36463by ``get_value_from`` and stored in the new example. 
    36564 
     65With exception to fixed discretization, discretization approaches infer the cut-off points from the 
     66training data set and thus construct a discretizer to convert continuous values of this feature into categorical 
     67value according to the rule found by discretization. In this respect, the discretization behaves similar to 
     68:class:`Orange.classification.Learner`. 
     69 
     70.. _`Discretization Algorithms` 
     71 
     72Discretization Algorithms 
     73========================= 
     74 
     75Instances of discretization classes are all derived from :class:`Discretization`. 
     76 
     77.. class:: Discretization 
     78 
     79    .. method:: __call__(feature, data[, weightID]) 
     80 
     81        Given a continuous ``feature``, ``data`` and, optionally id of 
     82        attribute with example weight, this function returns a discretized 
     83        feature. Argument ``feature`` can be a descriptor, index or 
     84        name of the attribute. 
     85 
     86 
     87.. class:: EqualWidth 
     88 
     89    Discretizes the feature by spliting its domain to a fixed number 
     90    of equal-width intervals. The span of original domain is computed 
     91    from the training data and is defined by the smallest and the 
     92    largest feature value. 
     93 
     94    .. attribute:: n 
     95 
     96        Number of discretization intervals (default: 4). 
     97 
     98The following example discretizes Iris dataset features using six 
     99intervals. The script constructs a :class:`Orange.data.Table` with discretized 
     100features and outputs their description: 
     101 
     102.. literalinclude:: code/discretization.py 
     103    :lines: 38-43 
     104 
     105The output of this script is:: 
     106 
     107    D_sepal length: <<4.90, [4.90, 5.50), [5.50, 6.10), [6.10, 6.70), [6.70, 7.30), >7.30> 
     108    D_sepal width: <<2.40, [2.40, 2.80), [2.80, 3.20), [3.20, 3.60), [3.60, 4.00), >4.00> 
     109    D_petal length: <<1.98, [1.98, 2.96), [2.96, 3.94), [3.94, 4.92), [4.92, 5.90), >5.90> 
     110    D_petal width: <<0.50, [0.50, 0.90), [0.90, 1.30), [1.30, 1.70), [1.70, 2.10), >2.10> 
     111 
     112The cut-off values are hidden in the discretizer and stored in ``attr.get_value_from.transformer``:: 
     113 
     114    >>> for attr in newattrs: 
     115    ...    print "%s: first interval at %5.3f, step %5.3f" % \ 
     116    ...    (attr.name, attr.get_value_from.transformer.first_cut, \ 
     117    ...    attr.get_value_from.transformer.step) 
     118    D_sepal length: first interval at 4.900, step 0.600 
     119    D_sepal width: first interval at 2.400, step 0.400 
     120    D_petal length: first interval at 1.980, step 0.980 
     121    D_petal width: first interval at 0.500, step 0.400 
     122 
     123All discretizers have the method 
     124``construct_variable``: 
     125 
     126.. literalinclude:: code/discretization.py 
     127    :lines: 69-73 
     128 
     129 
     130.. class:: EqualFreq 
     131 
     132    Infers the cut-off points so that the discretization intervals contain 
     133    approximately equal number of training data instances. 
     134 
     135    .. attribute:: n 
     136 
     137        Number of discretization intervals (default: 4). 
     138 
     139The resulting discretizer is of class :class:`IntervalDiscretizer`. Its ``transformer`` includes ``points`` 
     140that store the inferred cut-offs. 
     141 
     142.. class:: Entropy 
     143 
     144    Entropy-based discretization as originally proposed by [FayyadIrani1993]_. The approach infers the most 
     145    appropriate number of intervals by recursively splitting the domain of continuous feature to minimize the 
     146    class-entropy of training examples. The splitting is repeated until the entropy decrease is smaller than the 
     147    increase of minimal descripton length (MDL) induced by the new cut-off point. 
     148 
     149    Entropy-based discretization can reduce a continuous feature into 
     150    a single interval if no suitable cut-off points are found. In this case the new feature is constant and can be 
     151    removed. This discretization can 
     152    therefore also serve for identification of non-informative features and thus used for feature subset selection. 
     153 
     154    .. attribute:: force_attribute 
     155 
     156        Forces the algorithm to induce at least one cut-off point, even when 
     157        its information gain is lower than MDL (default: ``False``). 
     158 
     159Part of :download:`discretization.py <code/discretization.py>`: 
     160 
     161.. literalinclude:: code/discretization.py 
     162    :lines: 77-80 
     163 
     164The output shows that all attributes are discretized onto three intervals:: 
     165 
     166    sepal length: <5.5, 6.09999990463> 
     167    sepal width: <2.90000009537, 3.29999995232> 
     168    petal length: <1.89999997616, 4.69999980927> 
     169    petal width: <0.600000023842, 1.0000004768> 
     170 
     171.. class:: BiModal 
     172 
     173    Infers two cut-off points to optimize the difference of class distribution of data instances in the 
     174    middle and in the other two intervals. The 
     175    difference is scored by chi-square statistics. All possible cut-off 
     176    points are examined, thus the discretization runs in O(n^2). This discretization method is especially suitable 
     177    for the attributes in 
     178    which the middle region corresponds to normal and the outer regions to 
     179    abnormal values of the feature. 
     180 
     181    .. attribute:: split_in_two 
     182 
     183        Decides whether the resulting attribute should have three or two values. 
     184        If ``True`` (default), the feature will be discretized to three 
     185        intervals and the discretizer is of type :class:`BiModalDiscretizer`. 
     186        If ``False`` the result is the ordinary :class:`IntervalDiscretizer`. 
     187 
     188Iris dataset has three-valued class attribute. The figure below, drawn using LOESS probability estimation, shows that 
     189sepal lenghts of versicolors are between lengths of setosas and virginicas. 
     190 
     191.. image:: files/bayes-iris.gif 
     192 
     193If we merge classes setosa and virginica, we can observe if 
     194the bi-modal discretization would correctly recognize the interval in 
     195which versicolors dominate. The following scripts peforms the merging and construction of new data set with class 
     196that reports if iris is versicolor or not. 
     197 
     198.. literalinclude:: code/discretization.py 
     199    :lines: 84-87 
     200 
     201The following script implements the discretization: 
     202 
     203.. literalinclude:: code/discretization.py 
     204    :lines: 97-100 
     205 
     206The middle intervals are printed:: 
     207 
     208    sepal length: (5.400, 6.200] 
     209    sepal width: (2.000, 2.900] 
     210    petal length: (1.900, 4.700] 
     211    petal width: (0.600, 1.600] 
     212 
     213Judging by the graph, the cut-off points inferred by discretization for "sepal length" make sense. 
     214 
     215.. _Discretizers: 
     216 
     217Discretizers 
     218============= 
     219 
     220Discretizers construct a categorical feature from the continuous feature according to the method they implement and 
     221its parameters. The most general is 
     222:class:`IntervalDiscretizer` that is also used by most discretization 
     223methods. Two other discretizers, :class:`EquiDistDiscretizer` and 
     224:class:`ThresholdDiscretizer`> could easily be replaced by 
     225:class:`IntervalDiscretizer` but are used for speed and simplicity. 
     226The fourth discretizer, :class:`BiModalDiscretizer` is specialized 
     227for discretizations induced by :class:`BiModalDiscretization`. 
     228 
     229.. class:: Discretizer 
     230 
     231    A superclass implementing the construction of a new 
     232    attribute from an existing one. 
     233 
     234    .. method:: construct_variable(feature) 
     235 
     236        Constructs a descriptor for a new feature. The new feature's name is equal to ``feature.name`` 
     237        prefixed by "D\_". Its symbolic values are discretizer specific. 
     238 
     239.. class:: IntervalDiscretizer 
     240 
     241    Discretizer defined with a set of cut-off points. 
     242 
     243    .. attribute:: points 
     244 
     245        The cut-off points; feature values below or equal to the first point will be mapped to the first interval, 
     246        those between the first and the second point 
     247        (including those equal to the second) are mapped to the second interval and 
     248        so forth to the last interval which covers all values greater than 
     249        the last value in ``points``. The number of intervals is thus 
     250        ``len(points)+1``. 
     251 
     252The script that follows is an examples of a manual construction of a discretizer with cut-off points 
     253at 3.0 and 5.0: 
     254 
     255.. literalinclude:: code/discretization.py 
     256    :lines: 22-26 
     257 
     258First five data instances of ``data2`` are:: 
     259 
     260    [5.1, '>5.00', 'Iris-setosa'] 
     261    [4.9, '(3.00, 5.00]', 'Iris-setosa'] 
     262    [4.7, '(3.00, 5.00]', 'Iris-setosa'] 
     263    [4.6, '(3.00, 5.00]', 'Iris-setosa'] 
     264    [5.0, '(3.00, 5.00]', 'Iris-setosa'] 
     265 
     266The same discretizer can be used on several features by calling the function construct_var: 
     267 
     268.. literalinclude:: code/discretization.py 
     269    :lines: 30-34 
     270 
     271Each feature has its own instance of :class:`ClassifierFromVar` stored in 
     272``get_value_from``, but all use the same :class:`IntervalDiscretizer`, 
     273``idisc``. Changing any element of its ``points`` affect all attributes. 
     274 
     275.. note:: 
     276 
     277    The length of :obj:`~IntervalDiscretizer.points` should not be changed if the 
     278    discretizer is used by any attribute. The length of 
     279    :obj:`~IntervalDiscretizer.points` should always match the number of values 
     280    of the feature, which is determined by the length of the attribute's field 
     281    ``values``. If ``attr`` is a discretized attribute, than ``len(attr.values)`` must equal 
     282    ``len(attr.get_value_from.transformer.points)+1``. 
     283 
     284 
     285.. class:: EqualWidthDiscretizer 
     286 
     287    Discretizes to intervals of the fixed width. All values lower than :obj:`~EquiDistDiscretizer.first_cut` are mapped to the first 
     288    interval. Otherwise, value ``val``'s interval is ``floor((val-first_cut)/step)``. Possible overflows are mapped to the 
     289    last intervals. 
     290 
     291 
     292    .. attribute:: first_cut 
     293 
     294        The first cut-off point. 
     295 
     296    .. attribute:: step 
     297 
     298        Width of the intervals. 
     299 
     300    .. attribute:: n 
     301 
     302        Number of the intervals. 
     303 
     304    .. attribute:: points (read-only) 
     305 
     306        The cut-off points; this is not a real attribute although it behaves 
     307        as one. Reading it constructs a list of cut-off points and returns it, 
     308        but changing the list doesn't affect the discretizer. Only present to provide 
     309        the :obj:`EquiDistDiscretizer` the same interface as that of 
     310        :obj:`IntervalDiscretizer`. 
     311 
     312 
     313.. class:: ThresholdDiscretizer 
     314 
     315    Threshold discretizer converts continuous values into binary by comparing 
     316    them to a fixed threshold. Orange uses this discretizer for 
     317    binarization of continuous attributes in decision trees. 
     318 
     319    .. attribute:: threshold 
     320 
     321        The value threshold; values below or equal to the threshold belong to the first 
     322        interval and those that are greater go to the second. 
     323 
     324 
     325.. class:: BiModalDiscretizer 
     326 
     327    Bimodal discretizer has two cut off points and values are 
     328    discretized according to whether or not they belong to the region between these points 
     329    which includes the lower but not the upper boundary. The 
     330    discretizer is returned by :class:`BiModalDiscretization` if its 
     331    field :obj:`~BiModalDiscretization.split_in_two` is true (the default). 
     332 
     333    .. attribute:: low 
     334 
     335        Lower boundary of the interval (included in the interval). 
     336 
     337    .. attribute:: high 
     338 
     339        Upper boundary of the interval (not included in the interval). 
     340 
    366341References 
    367342========== 
  • docs/reference/rst/code/datatable1.py

    r9927 r9945  
    1 # Description: Shows how to construct an orange.ExampleTable out of nothing 
     1# Description: Shows how to construct an Orange.data.Table out of nothing 
    22# Category:    basic classes 
    33# Classes:     ExampleTable, Domain 
  • docs/reference/rst/code/datatable2.py

    r9927 r9946  
    1212    data.append([i]) 
    1313 
    14 cv_indices = Orange.core.MakeRandomIndicesCV(data, 4) 
     14cv_indices = Orange.data.sample.SubsetIndicesCV(data, 4) 
    1515print "Indices: ", cv_indices, "\n" 
    1616 
  • docs/reference/rst/code/ensemble-forest-measure.py

    r9638 r9945  
    1818    #call by attribute index 
    1919    imp0 = measure(0, iris)  
    20     #call by orange.Variable 
     20    #call with a Descriptor 
    2121    imp1 = measure(iris.domain.attributes[1], iris) 
    2222    print "first: %0.2f, second: %0.2f\n" % (imp0, imp1) 
  • docs/reference/rst/code/imputation-complex.py

    r9638 r9946  
    122122def compute_span(ex, rw): 
    123123    if ex["TYPE"] == "WOOD" or ex["PURPOSE"] == "WALK": 
    124         return orange.Value(span_var, "SHORT") 
     124        return Orange.data.Value(span_var, "SHORT") 
    125125    else: 
    126         return orange.Value(span_var, "MEDIUM") 
     126        return Orange.data.Value(span_var, "MEDIUM") 
    127127 
    128128imputer.models[bridges.domain.index("SPAN")] = compute_span 
     
    145145for i in original.domain: 
    146146    print "%s: %s -> %s" % (original.domain[i].name, original[i], imputed[i.name]), 
    147     if original.domain[i].varType == Orange.core.VarTypes.Continuous: 
     147    if original.domain[i].var_type == Orange.feature.Type.Continuous: 
    148148        print "(%s)" % imputed[i.name+"_def"] 
    149149    else: 
  • docs/reference/rst/code/knnExample1.py

    r9638 r9946  
    22iris = Orange.data.Table("iris") 
    33 
    4 rndind = Orange.core.MakeRandomIndices2(iris, p0=0.8) 
     4rndind = Orange.data.sample.SubsetIndices2(iris, p0=0.8) 
    55train = iris.select(rndind, 0) 
    66test = iris.select(rndind, 1) 
     
    88knn = Orange.classification.knn.kNNLearner(train, k=10) 
    99for i in range(5): 
    10     instance = test.randomexample() 
     10    instance = test.random_example() 
    1111    print instance.getclass(), knn(instance) 
  • docs/reference/rst/code/knnExample2.py

    r9823 r9946  
    44knn = Orange.classification.knn.kNNLearner() 
    55knn.k = 10 
    6 knn.distance_constructor = Orange.core.ExamplesDistanceConstructor_Hamming() 
     6knn.distance_constructor = Orange.distance.Hamming() 
    77knn = knn(iris) 
    88for i in range(5): 
    9     instance = iris.randomexample() 
     9    instance = iris.random_example() 
    1010    print instance.getclass(), knn(instance) 
  • docs/reference/rst/code/knnInstanceDistance.py

    r9936 r9946  
    44 
    55nnc = Orange.classification.knn.FindNearestConstructor() 
    6 nnc.distanceConstructor = Orange.core.ExamplesDistanceConstructor_Euclidean() 
     6nnc.distance_constructor = Orange.distance.Euclidean() 
    77 
    88did = Orange.feature.Descriptor.new_meta_id() 
  • docs/reference/rst/code/knnlearner.py

    r9823 r9946  
    99 
    1010print "Testing using euclidean distance" 
    11 rndind = Orange.core.MakeRandomIndices2(iris, p0=0.8) 
     11rndind = Orange.data.sample.SubsetIndices2(iris, p0=0.8) 
    1212train = iris.select(rndind, 0) 
    1313test = iris.select(rndind, 1) 
     
    1515knn = Orange.classification.knn.kNNLearner(train, k=10) 
    1616for i in range(5): 
    17     instance = test.randomexample() 
     17    instance = test.random_example() 
    1818    print instance.getclass(), knn(instance) 
    1919 
     
    2323knn = Orange.classification.knn.kNNLearner() 
    2424knn.k = 10 
    25 knn.distanceConstructor = Orange.core.ExamplesDistanceConstructor_Hamming() 
     25knn.distance_constructor = Orange.distance.Hamming() 
    2626knn = knn(train) 
    2727for i in range(5): 
    28     instance = test.randomexample() 
     28    instance = test.random_example() 
    2929    print instance.getclass(), knn(instance) 
  • docs/reference/rst/code/lookup-table.py

    r9927 r9945  
    1 # Description: Shows how to construct an orange.ClassifierFromExampleTable 
     1# Description: Shows how to construct an Orange.classification.lookup.LookupLearner 
    22# Category:    classification, lookup classifiers, constructive induction, feature construction 
    33# Classes:     ClassifierByExampleTable, LookupLearner 
  • docs/reference/rst/code/mds-advanced.py

    r9891 r9946  
    1212 
    1313# Construct a distance matrix using Euclidean distance 
    14 dist = Orange.core.ExamplesDistanceConstructor_Euclidean(iris) 
     14dist = Orange.distance.Euclidean(iris) 
    1515matrix = Orange.misc.SymMatrix(len(iris)) 
    1616for i in range(len(iris)): 
  • docs/reference/rst/code/optimization-thresholding2.py

    r9823 r9946  
    22 
    33bupa = Orange.data.Table("bupa") 
    4 ri2 = Orange.core.MakeRandomIndices2(bupa, 0.7) 
     4ri2 = Orange.data.sample.SubsetIndices2(bupa, 0.7) 
    55train = bupa.select(ri2, 0) 
    66test = bupa.select(ri2, 1) 
  • docs/reference/rst/code/optimization-tuningm.py

    r9823 r9946  
    55tuner = Orange.optimization.TuneMParameters(object=learner, 
    66             parameters=[("minSubset", [2, 5, 10, 20]), 
    7                          ("measure", [Orange.core.MeasureAttribute_gainRatio(),  
    8                                       Orange.core.MeasureAttribute_gini()])], 
     7                         ("measure", [Orange.feature.scoring.GainRatio(),  
     8                                      Orange.feature.scoring.Gini()])], 
    99             evaluate = Orange.evaluation.scoring.AUC) 
    1010 
  • docs/reference/rst/code/randomindices2.py

    r9823 r9946  
    2222 
    2323print "\nIndices with random generator" 
    24 indices2.random_generator = Orange.core.RandomGenerator(42)     
     24indices2.random_generator = Orange.misc.Random(42)     
    2525for i in range(5): 
    2626    print indices2(lenses) 
  • docs/reference/rst/code/reliability-basic.py

    r9875 r9946  
    1919instance = housing[0] 
    2020 
    21 value, probability = restimator(instance, result_type=Orange.core.GetBoth) 
     21value, probability = restimator(instance, result_type=Orange.classification.Classifier.GetBoth) 
    2222 
    2323for estimate in probability.reliability_estimate: 
  • docs/reference/rst/code/reliability-long.py

    r9875 r9946  
    3737                                 estimate[0], estimate[1]) 
    3838 
    39 indices = Orange.core.MakeRandomIndices2(prostate, p0=0.7) 
     39indices = Orange.data.sample.SubsetIndices2(prostate, p0=0.7) 
    4040train = prostate.select(indices, 0) 
    4141test = prostate.select(indices, 1) 
  • docs/reference/rst/code/reliability_basic.py

    r9906 r9946  
    1313instance = housing[0] 
    1414 
    15 value, probability = restimator(instance, result_type=Orange.core.GetBoth) 
     15value, probability = restimator(instance, result_type=Orange.classification.Classifier.GetBoth) 
    1616 
    1717for estimate in probability.reliability_estimate: 
  • docs/reference/rst/code/scoring-relief-caching.py

    r9823 r9945  
    55# Referenced:  MeasureAttribute.htm 
    66 
    7 import orange 
    8 iris = orange.ExampleTable("iris") 
     7import Orange 
     8iris = Orange.data.Table("iris") 
    99 
    10 r1 = orange.MeasureAttribute_relief() 
    11 r2 = orange.MeasureAttribute_relief(check_cached_data = False) 
     10r1 = Orange.feature.scoring.Relief() 
     11r2 = Orange.feature.scoring.Relief(check_cached_data = False) 
    1212 
    1313print "%.3f\t%.3f" % (r1(0, iris), r2(0, iris)) 
  • docs/reference/rst/code/selection-bayes.py

    r9878 r9946  
    3030        self.__dict__.update(kwds) 
    3131     
    32     def __call__(self, example, resultType = Orange.core.GetValue): 
     32    def __call__(self, example, resultType = Orange.classification.Classifier.GetValue): 
    3333        return self.classifier(example, resultType) 
    3434 
  • docs/reference/rst/code/statExample0.py

    r9372 r9945  
    1 import orange, orngTest, orngTree 
     1import Orange 
    22 
    3 learners = [orange.BayesLearner(name = "bayes"), 
    4             orngTree.TreeLearner(name="tree"), 
    5             orange.MajorityLearner(name="majrty")] 
     3learners = [ Orange.classification.bayes.NaiveLearner(name = "bayes"), 
     4             Orange.classification.tree.TreeLearner(name="tree"), 
     5             Orange.classification.majority.MajorityLearner(name="majrty")] 
    66 
    7 voting = orange.ExampleTable("voting") 
    8 res = orngTest.crossValidation(learners, voting) 
     7voting = Orange.data.Table("voting") 
     8res = Orange.evaluation.testing.cross_validation(learners, voting) 
    99 
    10 vehicle = orange.ExampleTable("vehicle") 
    11 resVeh = orngTest.crossValidation(learners, vehicle) 
     10vehicle = Orange.data.Table("vehicle") 
     11resVeh = Orange.evaluation.testing.cross_validation(learners, vehicle) 
  • docs/reference/rst/code/statExample1.py

    r9372 r9945  
    1 import orange, orngTest, orngTree 
     1import Orange 
    22 
    3 learners = [orange.BayesLearner(name = "bayes"), 
    4             orngTree.TreeLearner(name="tree"), 
    5             orange.MajorityLearner(name="majrty")] 
     3learners = [ Orange.classification.bayes.NaiveLearner(name = "bayes"), 
     4             Orange.classification.tree.TreeLearner(name="tree"), 
     5             Orange.classification.majority.MajorityLearner(name="majrty")] 
    66 
    7 voting = orange.ExampleTable("voting") 
    8 res = orngTest.crossValidation(learners, voting) 
     7voting = Orange.data.Table("voting") 
     8res = Orange.evaluation.testing.cross_validation(learners, voting) 
    99 
    10 vehicle = orange.ExampleTable("vehicle") 
    11 resVeh = orngTest.crossValidation(learners, vehicle) 
     10vehicle = Orange.data.Table("vehicle") 
     11resVeh = Orange.evaluation.testing.cross_validation(learners, vehicle) 
    1212 
    1313import orngStat 
    1414 
    15 CAs = orngStat.CA(res) 
    16 APs = orngStat.AP(res) 
    17 Briers = orngStat.BrierScore(res) 
    18 ISs = orngStat.IS(res) 
     15CAs = Orange.evaluation.scoring.CA(res) 
     16APs = Orange.evaluation.scoring.AP(res) 
     17Briers = Orange.evaluation.scoring.Brier_score(res) 
     18ISs = Orange.evaluation.scoring.IS(res) 
    1919 
    2020print 
  • docs/reference/rst/code/statExamples.py

    r9372 r9945  
    44# Referenced:  orngStat.htm 
    55 
    6 import orange, orngTest, orngTree 
     6import Orange 
    77 
    8 learners = [orange.BayesLearner(name = "bayes"), 
    9             orngTree.TreeLearner(name="tree"), 
    10             orange.MajorityLearner(name="majrty")] 
     8learners = [ Orange.classification.bayes.NaiveLearner(name = "bayes"), 
     9             Orange.classification.tree.TreeLearner(name="tree"), 
     10             Orange.classification.majority.MajorityLearner(name="majrty")] 
    1111 
    12 voting = orange.ExampleTable("voting") 
    13 res = orngTest.crossValidation(learners, voting) 
     12voting = Orange.data.Table("voting") 
     13res = Orange.evaluation.testing.cross_validation(learners, voting) 
    1414 
    15 vehicle = orange.ExampleTable("vehicle") 
    16 resVeh = orngTest.crossValidation(learners, vehicle) 
     15vehicle = Orange.data.Table("vehicle") 
     16resVeh = Orange.evaluation.testing.cross_validation(learners, vehicle) 
    1717 
    18 import orngStat 
     18import Orange.evaluation.scoring 
    1919 
    20 CAs = orngStat.CA(res) 
    21 APs = orngStat.AP(res) 
    22 Briers = orngStat.BrierScore(res) 
    23 ISs = orngStat.IS(res) 
     20CAs = Orange.evaluation.scoring.CA(res) 
     21APs = Orange.evaluation.scoring.AP(res) 
     22Briers = Orange.evaluation.scoring.Brier_score(res) 
     23ISs = Orange.evaluation.scoring.IS(res) 
    2424 
    2525print 
     
    2929 
    3030 
    31 CAs = orngStat.CA(res, reportSE=True) 
    32 APs = orngStat.AP(res, reportSE=True) 
    33 Briers = orngStat.BrierScore(res, reportSE=True) 
    34 ISs = orngStat.IS(res, reportSE=True) 
     31CAs = Orange.evaluation.scoring.CA(res, reportSE=True) 
     32APs = Orange.evaluation.scoring.AP(res, reportSE=True) 
     33Briers = Orange.evaluation.scoring.Brier_score(res, reportSE=True) 
     34ISs = Orange.evaluation.scoring.IS(res, reportSE=True) 
    3535 
    3636print 
     
    4141 
    4242print 
    43 cm = orngStat.confusionMatrices(res)[0] 
     43cm = Orange.evaluation.scoring.confusion_matrices(res)[0] 
    4444print "Confusion matrix for naive Bayes:" 
    4545print "TP: %i, FP: %i, FN: %s, TN: %i" % (cm.TP, cm.FP, cm.FN, cm.TN) 
    4646 
    4747print 
    48 cm = orngStat.confusionMatrices(res, cutoff=0.2)[0] 
     48cm = Orange.evaluation.scoring.confusion_matrices(res, cutoff=0.2)[0] 
    4949print "Confusion matrix for naive Bayes:" 
    5050print "TP: %i, FP: %i, FN: %s, TN: %i" % (cm.TP, cm.FP, cm.FN, cm.TN) 
    5151 
    5252print 
    53 cm = orngStat.confusionMatrices(resVeh, vehicle.domain.classVar.values.index("van"))[0] 
     53cm = Orange.evaluation.scoring.confusion_matrices(resVeh, vehicle.domain.class_var.values.index("van"))[0] 
    5454print "Confusion matrix for naive Bayes for 'van':" 
    5555print "TP: %i, FP: %i, FN: %s, TN: %i" % (cm.TP, cm.FP, cm.FN, cm.TN) 
    5656 
    5757print 
    58 cm = orngStat.confusionMatrices(resVeh, vehicle.domain.classVar.values.index("opel"))[0] 
     58cm = Orange.evaluation.scoring.confusion_matrices(resVeh, vehicle.domain.class_var.values.index("opel"))[0] 
    5959print "Confusion matrix for naive Bayes for 'opel':" 
    6060print "TP: %i, FP: %i, FN: %s, TN: %i" % (cm.TP, cm.FP, cm.FN, cm.TN) 
    6161 
    6262print 
    63 cm = orngStat.confusionMatrices(resVeh)[0] 
    64 classes = vehicle.domain.classVar.values 
     63cm = Orange.evaluation.scoring.confusion_matrices(resVeh)[0] 
     64classes = vehicle.domain.class_var.values 
    6565print "\t"+"\t".join(classes) 
    6666for className, classConfusions in zip(classes, cm): 
    6767    print ("%s" + ("\t%i" * len(classes))) % ((className, ) + tuple(classConfusions)) 
    6868 
    69 cm = orngStat.confusionMatrices(res) 
     69cm = Orange.evaluation.scoring.confusion_matrices(res) 
    7070print 
    7171print "Sensitivity and specificity for 'voting'" 
    7272print "method\tsens\tspec" 
    7373for l in range(len(learners)): 
    74     print "%s\t%5.3f\t%5.3f" % (learners[l].name, orngStat.sens(cm[l]), orngStat.spec(cm[l])) 
     74    print "%s\t%5.3f\t%5.3f" % (learners[l].name, Orange.evaluation.scoring.sens(cm[l]), Orange.evaluation.scoring.spec(cm[l])) 
    7575 
    76 cm = orngStat.confusionMatrices(resVeh, vehicle.domain.classVar.values.index("van")) 
     76cm = Orange.evaluation.scoring.confusion_matrices(resVeh, vehicle.domain.class_var.values.index("van")) 
    7777print 
    7878print "Sensitivity and specificity for 'vehicle=van'" 
    7979print "method\tsens\tspec" 
    8080for l in range(len(learners)): 
    81     print "%s\t%5.3f\t%5.3f" % (learners[l].name, orngStat.sens(cm[l]), orngStat.spec(cm[l])) 
     81    print "%s\t%5.3f\t%5.3f" % (learners[l].name, Orange.evaluation.scoring.sens(cm[l]), Orange.evaluation.scoring.spec(cm[l])) 
    8282 
    8383print 
    8484print "AUC (voting)" 
    8585 
    86 AUCs = orngStat.AUC(res) 
     86AUCs = Orange.evaluation.scoring.AUC(res) 
    8787for l in range(len(learners)): 
    8888    print "%10s: %5.3f" % (learners[l].name, AUCs[l]) 
     
    9292print "AUC for vehicle using weighted single-out method" 
    9393print "bayes\ttree\tmajority" 
    94 AUCs = orngStat.AUC(resVeh, orngStat.AUC.WeightedOneAgainstAll) 
     94AUCs = Orange.evaluation.scoring.AUC(resVeh, Orange.evaluation.scoring.AUC.WeightedOneAgainstAll) 
    9595print "%5.3f\t%5.3f\t%5.3f" % tuple(AUCs) 
    9696 
     
    100100print " " *25 + "  \tbayes\ttree\tmajority" 
    101101for i in range(4): 
    102     AUCs = orngStat.AUC(resVeh, i) 
     102    AUCs = Orange.evaluation.scoring.AUC(resVeh, i) 
    103103    print "%25s: \t%5.3f\t%5.3f\t%5.3f" % ((methods[i], ) + tuple(AUCs)) 
    104104 
    105105 
    106 classes = vehicle.domain.classVar.values 
    107 classDist = orange.Distribution(vehicle.domain.classVar, vehicle) 
     106classes = vehicle.domain.class_var.values 
     107classDist = Orange.statistics.distribution.Distribution(vehicle.domain.class_var, vehicle) 
    108108 
    109109print 
    110110print "AUC for detecting class 'van' in 'vehicle'" 
    111 AUCs = orngStat.AUC_single(resVeh, classIndex = vehicle.domain.classVar.values.index("van")) 
     111AUCs = Orange.evaluation.scoring.AUC_single(resVeh, classIndex = vehicle.domain.class_var.values.index("van")) 
    112112print "%5.3f\t%5.3f\t%5.3f" % tuple(AUCs) 
    113113 
     
    115115print "AUCs for detecting various classes in 'vehicle'" 
    116116for c,s in enumerate(classes): 
    117     print "%s (%5.3f) vs others: \t%5.3f\t%5.3f\t%5.3f" % ((s, classDist[c] ) + tuple(orngStat.AUC_single(resVeh, c))) 
     117    print "%s (%5.3f) vs others: \t%5.3f\t%5.3f\t%5.3f" % ((s, classDist[c] ) + tuple(Orange.evaluation.scoring.AUC_single(resVeh, c))) 
    118118 
    119119print 
    120 classes = vehicle.domain.classVar.values 
    121 AUCmatrix = orngStat.AUC_matrix(resVeh)[0] 
     120classes = vehicle.domain.class_var.values 
     121AUCmatrix = Orange.evaluation.scoring.AUC_matrix(resVeh)[0] 
    122122print "\t"+"\t".join(classes[:-1]) 
    123123for className, AUCrow in zip(classes[1:], AUCmatrix[1:]): 
     
    128128for c1, s1 in enumerate(classes): 
    129129    for c2 in range(c1): 
    130         print "%s vs %s: \t%5.3f\t%5.3f\t%5.3f" % ((s1, classes[c2]) + tuple(orngStat.AUC_pair(resVeh, c1, c2))) 
     130        print "%s vs %s: \t%5.3f\t%5.3f\t%5.3f" % ((s1, classes[c2]) + tuple(Orange.evaluation.scoring.AUC_pair(resVeh, c1, c2))) 
    131131 
    132132 
    133 ri2 = orange.MakeRandomIndices2(voting, 0.6) 
     133ri2 = Orange.data.sample.SubsetIndices2(voting, 0.6) 
    134134train = voting.selectref(ri2, 0) 
    135135test = voting.selectref(ri2, 1) 
    136 res1 = orngTest.learnAndTestOnTestData(learners, train, test) 
     136res1 = Orange.evaluation.testing.learn_and_test_on_test_data(learners, train, test) 
    137137 
    138138print 
    139139print "AUC and SE for voting" 
    140 AUCs = orngStat.AUCWilcoxon(res1) 
     140AUCs = Orange.evaluation.scoring.AUCWilcoxon(res1) 
    141141for li, lrn in enumerate(learners): 
    142142    print "%s: %5.3f+-%5.3f" % (lrn.name, AUCs[li][0], AUCs[li][1]) 
    143143 
    144144print 
    145 print "Difference between naive Bayes and tree: %5.3f+-%5.3f" % tuple(orngStat.compare2AUCs(res1, 0, 1)[2]) 
     145print "Difference between naive Bayes and tree: %5.3f+-%5.3f" % tuple(Orange.evaluation.scoring.compare_2_AUCs(res1, 0, 1)[2]) 
    146146 
    147147print 
    148148print "ROC (first 20 points) for bayes on 'voting'" 
    149 ROC_bayes = orngStat.computeROC(res1)[0] 
     149ROC_bayes = Orange.evaluation.scoring.compute_ROC(res1)[0] 
    150150for t in ROC_bayes[:20]: 
    151151    print "%5.3f\t%5.3f" % t 
  • docs/reference/rst/code/statExamplesGraphRanks.py

    r9372 r9945  
    1 import orange, orngStat 
     1import Orange 
    22 
    33names = ["first", "third", "second", "fourth" ] 
    44avranks =  [1.9, 3.2, 2.8, 3.3 ]  
    5 cd = orngStat.compute_CD(avranks, 30) #tested on 30 datasets 
    6 orngStat.graph_ranks("statExamples-graph_ranks1.png", avranks, names, \ 
     5cd = Orange.evaluation.scoring.compute_CD(avranks, 30) #tested on 30 datasets 
     6Orange.evaluation.scoring.graph_ranks("statExamples-graph_ranks1.png", avranks, names, \ 
    77    cd=cd, width=6, textspace=1.5) 
  • docs/reference/rst/code/testing-test.py

    r9894 r9946  
    2424print "\nproportionsTest that will give different results, \ 
    2525but the same each time the script is run" 
    26 myRandom = Orange.core.RandomGenerator() 
     26myRandom = Orange.misc.Random() 
    2727for i in range(3): 
    2828    res = Orange.evaluation.testing.proportion_test(learners, voting, 0.7, 
    29         randomGenerator=myRandom) 
     29        random_generator=myRandom) 
    3030    printResults(res) 
    3131# End 
     
    5959 
    6060print "\nLearning curve with pre-separated data" 
    61 indices = Orange.core.MakeRandomIndices2(voting, p0=0.7) 
     61indices = Orange.data.sample.SubsetIndices2(voting, p0=0.7) 
    6262train = voting.select(indices, 0) 
    6363test = voting.select(indices, 1) 
  • docs/reference/rst/code/transformvalue-d2c.py

    r9924 r9945  
    77e1.getValueFrom = Orange.core.ClassifierFromVar(whichVar = data.domain["e"]) 
    88e1.getValueFrom.transformer = Orange.core.Discrete2Continuous() 
    9 e1.getValueFrom.transformer.value = int(orange.Value(e, "1")) 
     9e1.getValueFrom.transformer.value = int(Orange.data.Value(e, "1")) 
  • docs/reference/rst/code/unusedValues.py

    r9869 r9946  
    22data = Orange.data.Table("unusedValues") 
    33 
    4 new_variables = [Orange.core.RemoveUnusedValues(var, data) for var in data.domain.variables] 
     4new_variables = [Orange.preprocess.RemoveUnusedValues(var, data) for var in data.domain.variables] 
    55 
    66print 
  • docs/reference/rst/code/variable-get_value_from.py

    r9897 r9946  
    1919print Orange.feature.scoring.InfoGain(e2, monks) 
    2020 
    21 dist = Orange.core.Distribution(e2, monks) 
     21dist = Orange.statistics.distribution.Distribution(e2, monks) 
    2222print dist  
    2323 
    2424# Split the data into training and testing set 
    25 indices = Orange.core.MakeRandomIndices2(monks, p0=0.7) 
     25indices = Orange.data.sample.SubsetIndices2(monks, p0=0.7) 
    2626train_data = monks.select(indices, 0) 
    2727test_data = monks.select(indices, 1) 
     
    3232 
    3333# Construct a tree and classify unmodified instances 
    34 tree = Orange.core.TreeLearner(new_train) 
     34tree = Orange.classification.tree.TreeLearner(new_train) 
    3535for ex in test_data[:10]: 
    3636    print ex.getclass(), tree(ex) 
Note: See TracChangeset for help on using the changeset viewer.