Changeset 8247:41c9ae750bc1 in orange
 Timestamp:
 08/21/11 21:43:05 (3 years ago)
 Branch:
 default
 Convert:
 09674293264c563182dd236eaa34efc8363de11e
 File:

 1 edited
Legend:
 Unmodified
 Added
 Removed

orange/Orange/classification/rules.py
r8233 r8247 123 123 :excludemembers: baseRules, beamWidth, coverAndRemove, dataStopping, 124 124 ruleFinder, ruleStopping, storeInstances, targetClass, weightID, 125 argument ID125 argument_id 126 126 127 127 This class has many more undocumented methods; see the source code for … … 808 808 classifier.defaultDistribution = self.prior 809 809 810 classifier.defaultDistribution.normalize() 810 811 if result_type == Orange.classification.Classifier.GetValue: 811 812 return classifier(instance) … … 1219 1220 ae = aes[0] 1220 1221 rule = self.learn_argumented_rule(ae, dich_data, weight_id) # target class is always first class (0) 1221 if debug and rule:1222 if self.debug and rule: 1222 1223 print "learned arg rule", Orange.classification.rules.rule_to_string(rule) 1223 elif debug:1224 elif self.debug: 1224 1225 print "no rule came out of ", ae 1225 1226 if rule: … … 1230 1231 aes = aes[1:] 1231 1232 1232 if not progress :1233 if not progress and self.debug: 1233 1234 print " arguments finished ... " 1234 1235 … … 1287 1288 # learn a rule 1288 1289 self.rule_finder.evaluator.bestRule = None 1289 self.rule_finder.evaluator.returnBestFuture = True 1290 self.rule_finder(examples,weight_id,0,positive_args) 1290 self.rule_finder(examples,weight_id, 0, positive_args) 1291 1291 1292 1292 # return best rule … … 1299 1299 1300 1300 # prepare covering mechanism 1301 self.cover _and_remove = CovererAndRemover_Prob(examples, weight_id, 0, self.apriori)1301 self.coverAndRemove = CovererAndRemover_Prob(examples, weight_id, 0, self.apriori, self.argument_id) 1302 1302 self.rule_finder.evaluator.probVar = examples.domain.getmeta(self.cover_and_remove.probAttribute) 1303 1303 … … 1349 1349 # sort examples by their arguments quality (using first argument as it has already been sorted) 1350 1350 sorted = arg_examples.native() 1351 sorted.sort(lambda x,y: cmp(x[self.argument_id].value.positive Arguments[0].quality,1352 y[self.argument_id].value.positive Arguments[0].quality))1351 sorted.sort(lambda x,y: cmp(x[self.argument_id].value.positive_arguments[0].quality, 1352 y[self.argument_id].value.positive_arguments[0].quality)) 1353 1353 return Orange.data.Table(examples.domain, sorted) 1354 1354 else: … … 1388 1388 rule.filter = Orange.core.Filter_values(domain = examples.domain, 1389 1389 conditions = rule.filter.conditions) 1390 rule.filterAndStore(examples, weight ID, cl)1390 rule.filterAndStore(examples, weight_id, cl) 1391 1391 if hasattr(rule, "learner") and hasattr(rule.learner, "arg_example"): 1392 1392 rule.learner.arg_example = Orange.data.Instance(examples.domain, rule.learner.arg_example) … … 1443 1443 pos_args = RuleList() 1444 1444 # prepare arguments 1445 for p in ae[self.argument_id].value.positive Arguments:1445 for p in ae[self.argument_id].value.positive_arguments: 1446 1446 new_arg = Rule(filter=ArgFilter(argument_id = self.argument_id, 1447 1447 filter = self.newFilter_values(p.filter), … … 1461 1461 p.filterAndStore(examples, weight_id, 0) 1462 1462 if not p.learner: 1463 p.learner = DefaultLearner(default Value=ae.getclass())1463 p.learner = DefaultLearner(default_value=ae.getclass()) 1464 1464 # pruning on: we check on all conditions and take only best 1465 1465 if self.prune_arguments: 1466 1466 allowed_conditions = [c for c in p.filter.conditions] 1467 1467 pruned_conditions = self.prune_arg_conditions(ae, allowed_conditions, examples, weight_id) 1468 p.baseDist = orange.Distribution(examples.domain.classVar, examples, weight_id) 1468 1469 p.filter.conditions = pruned_conditions 1470 p.learner.setattr("arg_length", 0) 1471 1469 1472 else: # prune only unspecified conditions 1470 1473 spec_conditions = [c for c in p.filter.conditions if not c.unspecialized_condition] … … 1473 1476 p.filter.conditions = spec_conditions 1474 1477 p.filterAndStore(examples, weight_id, 0) 1475 pruned_conditions = self.prune_arg_conditions(ae, unspec_conditions, p.examples, p.weight_id) 1478 p.baseDist = p.classDistribution 1479 p.learner.setattr("arg_length", len(p.filter.conditions)) 1480 pruned_conditions = self.prune_arg_conditions(ae, unspec_conditions, p.examples, p.weightID) 1476 1481 p.filter.conditions.extend(pruned_conditions) 1477 1482 p.filter.filter.conditions.extend(pruned_conditions) … … 1492 1497 p.filterAndStore(examples,weight_id,0) 1493 1498 p.filter.domain = examples.domain 1494 p.classifier = p.learner(p.examples, p.weight _id)1499 p.classifier = p.learner(p.examples, p.weightID) 1495 1500 p.requiredConditions = len(p.filter.conditions) 1496 1501 p.learner.setattr("arg_example", ae) … … 1511 1516 1512 1517 def init_neg_args(self, ae, examples, weight_id): 1513 return ae[self.argument_id].value.negative Arguments1518 return ae[self.argument_id].value.negative_arguments 1514 1519 1515 1520 def remaining_probability(self, examples): … … 1567 1572 rule_sig=rule_sig, att_sig=att_sig, min_coverage=int(min_coverage), 1568 1573 max_rule_complexity = int(max_rule_complexity)) 1574 1575 class DefaultLearner(Orange.core.Learner): 1576 """ 1577 Default lerner  returns default classifier with predefined output class. 1578 """ 1579 def __init__(self,default_value = None): 1580 self.default_value = default_value 1581 def __call__(self,examples,weight_id=0): 1582 return Orange.classification.majority.ConstantClassifier(self.default_value,defaultDistribution = Orange.core.Distribution(examples.domain.class_var,examples,weight_id)) 1583 1584 class ABCN2Ordered(ABCN2): 1585 """ 1586 Rules learned by ABCN2 are ordered and used as a decision list. 1587 """ 1588 def __init__(self, argument_id=0, **kwds): 1589 ABCN2.__init__(self, argument_id=argument_id, **kwds) 1590 self.classifier.set_prefix_rules = True 1591 self.classifier.optimize_betas = False 1592 1593 class ABCN2M(ABCN2): 1594 """ 1595 Argument based rule learning with mestimate as evaluation function. 1596 """ 1597 def __init__(self, argument_id=0, **kwds): 1598 ABCN2.__init__(self, argument_id=argument_id, **kwds) 1599 self.opt_reduction = 0 1600 self.rule_finder.evaluator.optimismReduction = self.opt_reduction 1601 self.classifier = CN2UnorderedClassifier 1602 1603 class ABCN2MLRC(ABCN2): 1604 """ 1605 Argument based rule learning with mestimate as evaluation function. LRC is used as a classification method. 1606 """ 1607 def __init__(self, argument_id=0, **kwds): 1608 ABCN2.__init__(self, argument_id=argument_id, **kwds) 1609 self.opt_reduction = 0 1610 self.rule_finder.evaluator.optimismReduction = self.opt_reduction 1611 1612 class ABCN2_StandardClassification(ABCN2): 1613 """ 1614 Argument based rule learning with the original classification technique. 1615 """ 1616 def __init__(self, argument_id=0, **kwds): 1617 ABCN2.__init__(self, argument_id=argument_id, **kwds) 1618 self.classifier = CN2UnorderedClassifier 1569 1619 1570 1620 … … 1728 1778 class CovererAndRemover_Prob(RuleCovererAndRemover): 1729 1779 """ This class impements probabilistic covering. """ 1730 def __init__(self, probAttribute=None, sigAttribute=None): 1731 self.indices = None 1732 self.probAttribute = probAttribute 1733 self.bestRule = [] 1734 1735 def initialize(self, instances, weight_id, target_class, apriori): 1736 self.bestRule = [None]*len(instances) 1737 self.probAttribute = Orange.core.newmetaid() 1738 instances.addMetaAttribute(self.probAttribute,1.e6) 1739 instances.domain.addmeta(self.probAttribute, \ 1780 def __init__(self, examples, weight_id, target_class, apriori, argument_id): 1781 self.best_rule = [None]*len(examples) 1782 self.prob_attribute = Orange.core.newmetaid() 1783 self.apriori_prob = apriori[target_class]/apriori.abs 1784 examples.addMetaAttribute(self.prob_attribute, self.apriori_prob) 1785 examples.domain.addmeta(self.prob_attribute, 1740 1786 Orange.data.variable.Continuous("Probs")) 1741 for instance in instances: 1742 ## if target_class<0 or (instance.getclass() == target_class): 1743 instance[self.probAttribute] = apriori[target_class]/apriori.abs 1744 return instances 1745 1746 def getBestRules(self, currentRules, instances, weight_id): 1787 self.argument_id = argument_id 1788 1789 def getBestRules(self, current_rules, examples, weight_id): 1747 1790 best_rules = RuleList() 1748 for r in currentRules:1749 if hasattr(r.learner, "argumentRule") and not orngCN2.rule_in_set(r,best_rules):1750 best_rules.append(r)1751 for r_i,r in enumerate(self.bestRule):1752 if r and not rule_in_set(r,best_rules) and instances[r_i].\1753 getclass()==r.classifier.default_value:1791 for r_i,r in enumerate(self.best_rule): 1792 if r and not rule_in_set(r,best_rules) and int(examples[r_i].getclass())==int(r.classifier.default_value): 1793 if hasattr(r.learner, "arg_example"): 1794 setattr(r, "best_example", r.learner.arg_example) 1795 else: 1796 setattr(r, "best_example", examples[r_i]) 1754 1797 best_rules.append(r) 1755 1798 return best_rules 1756 1757 def remainingInstancesP(self, instances, target_class): 1758 pSum, pAll = 0.0, 0.0 1759 for ex in instances: 1760 if ex.getclass() == target_class: 1761 pSum += ex[self.probAttribute] 1762 pAll += 1.0 1763 return pSum/pAll 1764 1765 def __call__(self, rule, instances, weights, target_class): 1766 if target_class<0: 1767 for instance_i, instance in enumerate(instances): 1768 if rule(instance) and rule.quality>instance[self.probAttribute]0.01: 1769 instance[self.probAttribute] = rule.quality+0.01 1770 self.bestRule[instance_i]=rule 1771 else: 1772 for instance_i, instance in enumerate(instances): #rule.classifier.default_val == instance.getclass() and 1773 if rule(instance) and rule.quality>instance[self.probAttribute]: 1774 instance[self.probAttribute] = rule.quality+0.001 1775 self.bestRule[instance_i]=rule 1776 ## if rule.classifier.default_val == instance.getclass(): 1777 ## print instance[self.probAttribute] 1778 # compute factor 1779 return (instances,weights) 1799 1800 def __call__(self, rule, examples, weights, target_class): 1801 """ if example has an argument, then the rule must be consistent with the argument. """ 1802 example = getattr(rule.learner, "arg_example", None) 1803 for ei, e in enumerate(examples): 1804 if e == example: 1805 e[self.prob_attribute] = 1.0 1806 self.best_rule[ei] = rule 1807 elif rule(e) and rule.quality>e[self.prob_attribute]: 1808 e[self.prob_attribute] = rule.quality+0.001 # 0.001 is added to avoid numerical errors 1809 self.best_rule[ei]=rule 1810 return (examples,weights) 1811 1812 def filter_covers_example(self, example, filter): 1813 filter_indices = RuleCoversArguments.filterIndices(filter) 1814 if filter(example): 1815 try: 1816 if example[self.argument_id].value and len(example[self.argument_id].value.positive_arguments)>0: # example has positive arguments 1817 # conditions should cover at least one of the positive arguments 1818 one_arg_covered = False 1819 for pA in example[self.argument_id].value.positive_arguments: 1820 arg_covered = [self.condIn(c,filter_indices) for c in pA.filter.conditions] 1821 one_arg_covered = one_arg_covered or len(arg_covered) == sum(arg_covered) #arg_covered 1822 if one_arg_covered: 1823 break 1824 if not one_arg_covered: 1825 return False 1826 if example[self.argument_id].value and len(example[self.argument_id].value.negative_arguments)>0: # example has negative arguments 1827 # condition should not cover neither of negative arguments 1828 for pN in example[self.argument_id].value.negative_arguments: 1829 arg_covered = [self.condIn(c, filter_indices) for c in pN.filter.conditions] 1830 if len(arg_covered)==sum(arg_covered): 1831 return False 1832 except: 1833 return True 1834 return True 1835 return False 1836 1837 def condIn(self, cond, filter_indices): # is condition in the filter? 1838 condInd = RuleCoversArguments.conditionIndex(cond) 1839 if operator.or_(condInd,filter_indices[cond.position]) == filter_indices[cond.position]: 1840 return True 1841 return False 1842 1843 1844 def covered_percentage(self, examples): 1845 p = 0.0 1846 for ei, e in enumerate(examples): 1847 p += (e[self.prob_attribute]  self.apriori_prob)/(1.0self.apriori_prob) 1848 return p/len(examples) 1849 1850 1780 1851 1781 1852 … … 1898 1969 return l[int(math.floor(p*len(l)))] 1899 1970 1900 def createRandomDataSet(data): 1901 newData = Orange.data.Table(data) 1902 # shuffle data 1903 cl_num = newData.toNumeric("C") 1904 random.shuffle(cl_num[0][:,0]) 1905 clData = Orange.data.Table(Orange.data.Domain([newData.domain.class_var]),cl_num[0]) 1906 for d_i,d in enumerate(newData): 1907 d[newData.domain.class_var] = clData[d_i][newData.domain.class_var] 1908 return newData 1909 1910 # estimated fisher tippett parameters for a set of values given in vals list (+ deciles) 1911 def compParameters(vals,oldMi=0.5,oldBeta=1.1): 1912 # compute percentiles 1913 vals.sort() 1914 N = len(vals) 1915 percs = [avg(vals[int(float(N)*i/10):int(float(N)*(i+1)/10)]) for i in range(10)] 1916 if N<10: 1917 return oldMi, oldBeta, percs 1918 beta = math.sqrt(6*var(vals)/math.pow(math.pi,2)) 1919 beta = min(2.0,max(oldBeta, beta)) 1920 mi = max(oldMi, avg(vals)  0.57721*beta) 1921 return mi, beta, percs 1922 1923 def computeDists(data, weight=0, target_class=0, N=100, learner=None): 1924 """ Compute distributions of likelihood ratio statistics of extreme (best) rules.""" 1925 if not learner: 1926 learner = createLearner() 1927 1928 ######################### 1929 ## Learner preparation ## 1930 ######################### 1931 oldStopper = learner.rule_finder.rule_stoppingValidator 1932 evaluator = learner.rule_finder.evaluator 1933 learner.rule_finder.evaluator = RuleEvaluator_LRS() 1934 learner.rule_finder.evaluator.storeRules = True 1935 learner.rule_finder.rule_stoppingValidator = RuleValidator_LRS(alpha=1.0) 1936 learner.rule_finder.rule_stoppingValidator.max_rule_complexity = 0 1937 1938 # loop through N (sampling repetitions) 1939 maxVals = [] 1940 for d_i in range(N): 1941 # create data set (remove and randomize) 1942 tempData = createRandomDataSet(data) 1943 learner.rule_finder.evaluator.rules = RuleList() 1944 # Next, learn a rule 1945 bestRule = learner.rule_finder(tempData,weight,target_class,RuleList()) 1946 maxVals.append(bestRule.quality) 1947 extreme_dists=[compParameters(maxVals,1.0,1.0)] 1948 1949 ##################### 1950 ## Restore learner ## 1951 ##################### 1952 learner.rule_finder.evaluator = evaluator 1953 learner.rule_finder.rule_stoppingValidator = oldStopper 1954 return extreme_dists 1955 1956 def createEVDistList(evdList): 1957 l = Orange.core.EVDistList() 1958 for el in evdList: 1959 l.append(Orange.core.EVDist(mu=el[0],beta=el[1],percentiles=el[2])) 1960 return l 1961 1962 def add_sub_rules(rules, instances, weight, learner, dists): 1963 apriori = Orange.core.Distribution(instances.domain.class_var,instances,weight) 1964 new_rules = RuleList() 1965 for r in rules: 1966 new_rules.append(r) 1967 1968 # loop through rules 1969 for r in rules: 1970 tmpList = RuleList() 1971 tmpRle = r.clone() 1972 tmpRle.filter.conditions = [] 1973 tmpRle.parentRule = None 1974 tmpRle.filterAndStore(instances,weight,r.classifier.default_val) 1975 tmpList.append(tmpRle) 1976 while tmpList and len(tmpList[0].filter.conditions) <= len(r.filter.conditions): 1977 tmpList2 = RuleList() 1978 for tmpRule in tmpList: 1979 # evaluate tmpRule 1980 oldREP = learner.rule_finder.evaluator.returnExpectedProb 1981 learner.rule_finder.evaluator.returnExpectedProb = False 1982 learner.rule_finder.evaluator.evDistGetter.dists = createEVDistList(\ 1983 dists[int(r.classifier.default_val)]) 1984 tmpRule.quality = learner.rule_finder.evaluator(tmpRule, 1985 instances,weight,r.classifier.default_val,apriori) 1986 learner.rule_finder.evaluator.returnExpectedProb = oldREP 1987 # if rule not in rules already, add it to the list 1988 if not True in [rules_equal(ri,tmpRule) for ri in new_rules] and\ 1989 len(tmpRule.filter.conditions)>0 and tmpRule.quality >\ 1990 apriori[r.classifier.default_val]/apriori.abs: 1991 new_rules.append(tmpRule) 1992 # create new tmpRules, set parent Rule, append them to tmpList2 1993 if not True in [rules_equal(ri,tmpRule) for ri in new_rules]: 1994 for c in r.filter.conditions: 1995 tmpRule2 = tmpRule.clone() 1996 tmpRule2.parentRule = tmpRule 1997 tmpRule2.filter.conditions.append(c) 1998 tmpRule2.filterAndStore(instances,weight,r.classifier.default_val) 1999 if tmpRule2.class_distribution.abs < tmprule.class_distribution.abs: 2000 tmpList2.append(tmpRule2) 2001 tmpList = tmpList2 2002 for cl in instances.domain.class_var: 2003 tmpRle = Rule() 2004 tmpRle.filter = Orange.core.Filter_values(domain = instances.domain) 2005 tmpRle.parentRule = None 2006 tmpRle.filterAndStore(instances,weight,int(cl)) 2007 tmpRle.quality = tmpRle.class_distribution[int(cl)]/tmpRle.class_distribution.abs 2008 new_rules.append(tmpRle) 2009 return new_rules 2010 2011 2012 class DefaultLearner(Orange.core.Learner): 2013 """ 2014 Default lerner  returns default classifier with predefined output class. 2015 """ 2016 def __init__(self,default_value = None): 2017 self.default_value = default_value 2018 def __call__(self,examples,weight_id=0): 2019 return Orange.classification.majority.ConstantClassifier(self.default_value,defaultDistribution = Orange.core.Distribution(examples.domain.class_var,examples,weight_id)) 2020 2021 class ABCN2Ordered(ABCN2): 2022 """ 2023 Rules learned by ABCN2 are ordered and used as a decision list. 2024 """ 2025 def __init__(self, argument_id=0, **kwds): 2026 ABCN2.__init__(self, argument_id=argument_id, **kwds) 2027 self.classifier.set_prefix_rules = True 2028 self.classifier.optimize_betas = False 2029 2030 class ABCN2M(ABCN2): 2031 """ 2032 Argument based rule learning with mestimate as evaluation function. 2033 """ 2034 def __init__(self, argument_id=0, **kwds): 2035 ABCN2.__init__(self, argument_id=argument_id, **kwds) 2036 self.opt_reduction = 0 2037 2038 1971 class EVDFitter: 1972 """ Randomizes a dataset and fits an extreme value distribution onto it. """ 1973 1974 def __init__(self, learner, n=200, randomseed=100): 1975 self.learner = learner 1976 self.n = n 1977 self.randomseed = randomseed 1978 # initialize random seed to make experiments repeatable 1979 random.seed(self.randomseed) 1980 1981 1982 def createRandomDataSet(self, data): 1983 newData = Orange.core.ExampleTable(data) 1984 # shuffle data 1985 cl_num = newData.toNumpy("C") 1986 random.shuffle(cl_num[0][:,0]) 1987 clData = Orange.core.ExampleTable(Orange.core.Domain([newData.domain.classVar]),cl_num[0]) 1988 for d_i,d in enumerate(newData): 1989 d[newData.domain.classVar] = clData[d_i][newData.domain.classVar] 1990 return newData 1991 1992 def createEVDistList(self, evdList): 1993 l = Orange.core.EVDistList() 1994 for el in evdList: 1995 l.append(Orange.core.EVDist(mu=el[0],beta=el[1],percentiles=el[2])) 1996 return l 1997 1998 1999 # estimated fisher tippett parameters for a set of values given in vals list (+ deciles) 2000 def compParameters(self, vals, oldMi, oldBeta, oldPercs, fixedBeta=False): 2001 # compute percentiles 2002 vals.sort() 2003 N = len(vals) 2004 percs = [avg(vals[int(float(N)*i/10):int(float(N)*(i+1)/10)]) for i in range(10)] 2005 if N<10: 2006 return oldMi, oldBeta, percs 2007 if not fixedBeta: 2008 beta = min(2.0, math.sqrt(6*var(vals)/math.pow(math.pi,2)))#min(2.0, max(oldBeta, math.sqrt(6*var(vals)/math.pow(math.pi,2)))) 2009 else: 2010 beta = oldBeta 2011 mi = max(oldMi,percs[1]+beta*math.log(math.log(0.95))) 2012 mi = percs[1]+beta*math.log(math.log(0.95)) 2013 return max(oldMi, numpy.average(vals)beta*0.5772156649), beta, None 2014 2015 def prepare_learner(self): 2016 self.oldStopper = self.learner.ruleFinder.ruleStoppingValidator 2017 self.evaluator = self.learner.ruleFinder.evaluator 2018 self.refiner = self.learner.ruleFinder.refiner 2019 self.validator = self.learner.ruleFinder.validator 2020 self.ruleFilter = self.learner.ruleFinder.ruleFilter 2021 self.learner.ruleFinder.validator = None 2022 self.learner.ruleFinder.evaluator = Orange.core.RuleEvaluator_LRS() 2023 self.learner.ruleFinder.evaluator.storeRules = True 2024 self.learner.ruleFinder.ruleStoppingValidator = Orange.core.RuleValidator_LRS(alpha=1.0) 2025 self.learner.ruleFinder.ruleStoppingValidator.max_rule_complexity = 0 2026 self.learner.ruleFinder.refiner = Orange.core.RuleBeamRefiner_Selector() 2027 self.learner.ruleFinder.ruleFilter = Orange.core.RuleBeamFilter_Width(width = 5) 2028 2029 2030 def restore_learner(self): 2031 self.learner.ruleFinder.evaluator = self.evaluator 2032 self.learner.ruleFinder.ruleStoppingValidator = self.oldStopper 2033 self.learner.ruleFinder.refiner = self.refiner 2034 self.learner.ruleFinder.validator = self.validator 2035 self.learner.ruleFinder.ruleFilter = self.ruleFilter 2036 2037 def computeEVD(self, data, weightID=0, target_class=0, progress=None): 2038 import time 2039 # prepare learned for distribution computation 2040 self.prepare_learner() 2041 2042 # loop through N (sampling repetitions) 2043 extremeDists=[(0, 1, [])] 2044 self.learner.ruleFinder.ruleStoppingValidator.max_rule_complexity = self.oldStopper.max_rule_complexity 2045 maxVals = [[] for l in range(self.oldStopper.max_rule_complexity+1)] 2046 for d_i in range(self.n): 2047 if not progress: 2048 if self.learner.debug: 2049 print d_i, 2050 else: 2051 progress(float(d_i)/self.n, None) 2052 # create data set (remove and randomize) 2053 a = time.time() 2054 tempData = self.createRandomDataSet(data) 2055 a = time.time() 2056 self.learner.ruleFinder.evaluator.rules = RuleList() 2057 a = time.time() 2058 for l in range(self.oldStopper.max_rule_complexity+2): 2059 self.learner.ruleFinder.evaluator.rules.append(None) 2060 a = time.time() 2061 # Next, learn a rule 2062 self.learner.ruleFinder(tempData,weightID,target_class, RuleList()) 2063 a = time.time() 2064 for l in range(self.oldStopper.max_rule_complexity+1): 2065 if self.learner.ruleFinder.evaluator.rules[l]: 2066 maxVals[l].append(self.learner.ruleFinder.evaluator.rules[l].quality) 2067 else: 2068 maxVals[l].append(0) 2069 ## qs = [r.quality for r in self.learner.ruleFinder.evaluator.rules if r.complexity == l+1] 2070 #### if qs: 2071 #### for r in self.learner.ruleFinder.evaluator.rules: 2072 #### if r.quality == max(qs) and r.classDistribution.abs == 16 and r.classDistribution[0] == 16: 2073 #### print "best rule", orngCN2.ruleToString(r), r.quality 2074 ## if qs: 2075 ## maxVals[l].append(max(qs)) 2076 ## else: 2077 ## maxVals[l].append(0) 2078 a = time.time() 2079 2080 # longer rule should always be better than shorter rule 2081 for l in range(self.oldStopper.max_rule_complexity): 2082 for i in range(len(maxVals[l])): 2083 if maxVals[l+1][i] < maxVals[l][i]: 2084 maxVals[l+1][i] = maxVals[l][i] 2085 ## print 2086 ## for mi, m in enumerate(maxVals): 2087 ## print "mi=",mi,m 2088 2089 mu, beta, perc = 1.0, 2.0, [0.0]*10 2090 for mi,m in enumerate(maxVals): 2091 ## if mi == 0: 2092 ## mu, beta, perc = self.compParameters(m, mu, beta, perc) 2093 ## else: 2094 mu, beta, perc = self.compParameters(m, mu, beta, perc, fixedBeta=True) 2095 extremeDists.append((mu, beta, perc)) 2096 extremeDists.extend([(0,1,[])]*(mi)) 2097 if self.learner.debug: 2098 print mi, mu, beta, perc 2099 2100 self.restore_learner() 2101 return self.createEVDistList(extremeDists) 2102 2039 2103 class ABBeamFilter(Orange.core.RuleBeamFilter): 2040 2104 """ … … 2060 2124 return newStar 2061 2125 2062 def setArguments(self,domain,positive Arguments):2063 self.pArgs = positive Arguments2126 def setArguments(self,domain,positive_arguments): 2127 self.pArgs = positive_arguments 2064 2128 self.domain = domain 2065 2129 self.argTab = [0]*len(self.domain.attributes) … … 2078 2142 2079 2143 2080 class ruleCoversArguments:2144 class RuleCoversArguments: 2081 2145 """ 2082 2146 Class determines if rule covers one out of a set of arguments. … … 2088 2152 indNA = getattr(a.filter,"indices",None) 2089 2153 if not indNA: 2090 a.filter.setattr("indices", ruleCoversArguments.filterIndices(a.filter))2154 a.filter.setattr("indices", RuleCoversArguments.filterIndices(a.filter)) 2091 2155 self.indices.append(a.filter.indices) 2092 2156 … … 2095 2159 return False 2096 2160 if not getattr(rule.filter,"indices",None): 2097 rule.filter.indices = ruleCoversArguments.filterIndices(rule.filter)2161 rule.filter.indices = RuleCoversArguments.filterIndices(rule.filter) 2098 2162 for index in self.indices: 2099 2163 if map(operator.or_,rule.filter.indices,index) == rule.filter.indices: … … 2107 2171 for c in filter.conditions: 2108 2172 ind[c.position]=operator.or_(ind[c.position], 2109 ruleCoversArguments.conditionIndex(c))2173 RuleCoversArguments.conditionIndex(c)) 2110 2174 return ind 2111 2175 filterIndices = staticmethod(filterIndices) … … 2158 2222 2159 2223 def __call__(self, oldRule, data, weight_id, target_class=1): 2160 inNotAllowedSelectors = ruleCoversArguments(self.not_allowed_selectors)2224 inNotAllowedSelectors = RuleCoversArguments(self.not_allowed_selectors) 2161 2225 new_rules = Orange.core.RuleList() 2162 2226 … … 2164 2228 indices = getattr(oldRule.filter,"indices",None) 2165 2229 if not indices: 2166 indices = ruleCoversArguments.filterIndices(oldRule.filter)2230 indices = RuleCoversArguments.filterIndices(oldRule.filter) 2167 2231 oldRule.filter.setattr("indices",indices) 2168 2232 … … 2171 2235 for nA in self.not_allowed_selectors: 2172 2236 #print indices, nA.filter.indices 2173 at_i,type_na = ruleCoversArguments.oneSelectorToCover(indices, nA.filter.indices)2237 at_i,type_na = RuleCoversArguments.oneSelectorToCover(indices, nA.filter.indices) 2174 2238 if at_i>1: 2175 2239 negative_indices[at_i] = operator.or_(negative_indices[at_i],type_na) … … 2192 2256 acceptSpecial=0)) 2193 2257 tempRule.complexity += 1 2194 tempRule.filter.indices[i] = 1 # 1 stands for discrete attribute (see ruleCoversArguments.conditionIndex)2195 tempRule.filterAndStore(oldRule.examples, oldRule.weight _id, target_class)2258 tempRule.filter.indices[i] = 1 # 1 stands for discrete attribute (see RuleCoversArguments.conditionIndex) 2259 tempRule.filterAndStore(oldRule.examples, oldRule.weightID, target_class) 2196 2260 if len(tempRule.examples)<len(oldRule.examples): 2197 2261 new_rules.append(tempRule) … … 2228 2292 acceptSpecial=0)) 2229 2293 tempRule.complexity += 1 2230 tempRule.filter.indices[pos] = operator.or_(tempRule.filter.indices[pos],atIndex) # from ruleCoversArguments.conditionIndex2231 tempRule.filterAndStore(oldRule.examples,tempRule.weight _id,target_class)2294 tempRule.filter.indices[pos] = operator.or_(tempRule.filter.indices[pos],atIndex) # from RuleCoversArguments.conditionIndex 2295 tempRule.filterAndStore(oldRule.examples,tempRule.weightID,target_class) 2232 2296 return tempRule 2233 2297 … … 2236 2300 tempRule.filter.conditions[ci] = condition 2237 2301 tempRule.filter.conditions[ci].setattr("specialized",1) 2238 tempRule.filterAndStore(oldRule.examples,oldRule.weight _id,target_class)2302 tempRule.filterAndStore(oldRule.examples,oldRule.weightID,target_class) 2239 2303 return tempRule 2240 2304 … … 2245 2309 # I should take another look at it. 2246 2310 class ArgFilter(Orange.core.Filter): 2247 """ 2248 This class implements ABcovering principle. 2249 """ 2250 def __init__(self, argument_id=None, filter = Orange.core.Filter_values()): 2311 """ This class implements ABcovering principle. """ 2312 def __init__(self, argument_id=None, filter = Orange.core.Filter_values(), arg_example = None): 2251 2313 self.filter = filter 2252 2314 self.indices = getattr(filter,"indices",[]) 2253 2315 if not self.indices and len(filter.conditions)>0: 2254 self.indices = ruleCoversArguments.filterIndices(filter)2316 self.indices = RuleCoversArguments.filterIndices(filter) 2255 2317 self.argument_id = argument_id 2256 self.debug = 02257 2318 self.domain = self.filter.domain 2258 2319 self.conditions = filter.conditions 2320 self.arg_example = arg_example 2259 2321 2260 2322 def condIn(self,cond): # is condition in the filter? … … 2265 2327 2266 2328 def __call__(self,example): 2267 ## print "in", self.filter(example) , self.filter.conditions[0](example)2329 ## print "in", self.filter(example)#, self.filter.conditions[0](example) 2268 2330 ## print self.filter.conditions[1].values 2269 if self.filter(example): 2331 if self.filter(example) and example != self.arg_example: 2332 return True 2333 elif self.filter(example): 2270 2334 try: 2271 2335 if example[self.argument_id].value and len(example[self.argument_id].value.positiveArguments)>0: # example has positive arguments … … 2295 2359 self.filter.setattr(name,obj) 2296 2360 2297 def deep Copy(self):2361 def deep_copy(self): 2298 2362 newFilter = ArgFilter(argument_id=self.argument_id) 2299 2363 newFilter.filter = Orange.core.Filter_values() #self.filter.deepCopy() … … 2305 2369 newFilter.conditions = newFilter.filter.conditions 2306 2370 newFilter.indices = self.indices[:] 2307 if getattr(self,"candidateValues",None):2308 newFilter.candidateValues = self.candidateValues[:]2309 2371 return newFilter 2310 2372 … … 2330 2392 tempRule = oldRule.clone() 2331 2393 tempRule.filter.conditions.append(c) 2332 tempRule.filterAndStore(oldRule.examples, oldRule.weight _id, target_class)2394 tempRule.filterAndStore(oldRule.examples, oldRule.weightID, target_class) 2333 2395 if len(tempRule.examples)<len(oldRule.examples): 2334 2396 new_rules.append(tempRule) … … 2349 2411 acceptSpecial=0)) 2350 2412 if tempRule(self.example): 2351 tempRule.filterAndStore(oldRule.examples, oldRule.weight _id, target_class)2413 tempRule.filterAndStore(oldRule.examples, oldRule.weightID, target_class) 2352 2414 if len(tempRule.examples)<len(oldRule.examples): 2353 2415 new_rules.append(tempRule) … … 2360 2422 return new_rules 2361 2423 2362 2363 class CovererAndRemover_Prob(Orange.core.RuleCovererAndRemover):2364 """2365 This class impements probabilistic covering.2366 """2367 def __init__(self, examples, weight_id, target_class, apriori):2368 self.bestRule = [None]*len(examples)2369 self.probAttribute = Orange.core.newmetaid()2370 self.apriori_prob = apriori[target_class]/apriori.abs2371 examples.addMetaAttribute(self.probAttribute, self.apriori_prob)2372 examples.domain.addmeta(self.probAttribute, Orange.core.FloatVariable("Probs"))2373 2374 def getBestRules(self, currentRules, examples, weight_id):2375 best_rules = Orange.core.RuleList()2376 ## for r in currentRules:2377 ## if hasattr(r.learner, "argumentRule") and not Orange.classification.rules.rule_in_set(r,best_rules):2378 ## best_rules.append(r)2379 for r_i,r in enumerate(self.bestRule):2380 if r and not Orange.classification.rules.rule_in_set(r,best_rules) and int(examples[r_i].getclass())==int(r.classifier.default_value):2381 best_rules.append(r)2382 return best_rules2383 2384 def __call__(self, rule, examples, weights, target_class):2385 if hasattr(rule, "learner") and hasattr(rule.learner, "arg_example"):2386 example = rule.learner.arg_example2387 else:2388 example = None2389 for ei, e in enumerate(examples):2390 ## if e == example:2391 ## e[self.probAttribute] = 1.02392 ## self.bestRule[ei]=rule2393 if example and not (hasattr(self.bestRule[ei], "learner") and hasattr(self.bestRule[ei].learner, "arg_example")):2394 can_be_worst = True2395 else:2396 can_be_worst = False2397 if can_be_worst and rule(e) and rule.quality>(e[self.probAttribute]0.01):2398 e[self.probAttribute] = rule.quality+0.001 # 0.001 is added to avoid numerical errors2399 self.bestRule[ei]=rule2400 elif rule(e) and rule.quality>e[self.probAttribute]:2401 e[self.probAttribute] = rule.quality+0.001 # 0.001 is added to avoid numerical errors2402 self.bestRule[ei]=rule2403 return (examples,weights)2404 2405 def covered_percentage(self, examples):2406 p = 0.02407 for ei, e in enumerate(examples):2408 p += (e[self.probAttribute]  self.apriori_prob)/(1.0self.apriori_prob)2409 return p/len(examples)2410 2411 class EVDFitter:2412 """2413 Randomizes a dataset and fits an extreme value distribution onto it.2414 """2415 def __init__(self, learner, n=200, randomseed=100):2416 self.learner = learner2417 self.n = n2418 self.randomseed = randomseed2419 2420 def createRandomDataSet(self, data):2421 newData = Orange.core.ExampleTable(data)2422 # shuffle data2423 cl_num = newData.toNumpy("C")2424 random.shuffle(cl_num[0][:,0])2425 clData = Orange.core.ExampleTable(Orange.core.Domain([newData.domain.class_var]),cl_num[0])2426 for d_i,d in enumerate(newData):2427 d[newData.domain.class_var] = clData[d_i][newData.domain.class_var]2428 return newData2429 2430 def createEVDistList(self, evdList):2431 l = Orange.core.EVDistList()2432 for el in evdList:2433 l.append(Orange.core.EVDist(mu=el[0],beta=el[1],percentiles=el[2]))2434 return l2435 2436 # estimated fisher tippett parameters for a set of values given in vals list (+ deciles)2437 def compParameters(self, vals, oldMi=0.5,oldBeta=1.1):2438 # compute percentiles2439 vals.sort()2440 N = len(vals)2441 percs = [avg(vals[int(float(N)*i/10):int(float(N)*(i+1)/10)]) for i in range(10)]2442 if N<10:2443 return oldMi, oldBeta, percs2444 beta = min(2.0, max(oldBeta, math.sqrt(6*var(vals)/math.pow(math.pi,2))))2445 mi = max(oldMi,percs[1]+beta*math.log(math.log(0.95)))2446 return mi, beta, percs2447 2448 def prepare_learner(self):2449 self.oldStopper = self.learner.rule_finder.rule_stoppingValidator2450 self.evaluator = self.learner.rule_finder.evaluator2451 self.refiner = self.learner.rule_finder.refiner2452 self.validator = self.learner.rule_finder.validator2453 self.ruleFilter = self.learner.rule_finder.ruleFilter2454 self.learner.rule_finder.validator = None2455 self.learner.rule_finder.evaluator = Orange.core.RuleEvaluator_LRS()2456 self.learner.rule_finder.evaluator.storeRules = True2457 self.learner.rule_finder.rule_stoppingValidator = Orange.core.RuleValidator_LRS(alpha=1.0)2458 self.learner.rule_finder.rule_stoppingValidator.max_rule_complexity = 02459 self.learner.rule_finder.refiner = Orange.core.RuleBeamRefiner_Selector()2460 self.learner.rule_finder.ruleFilter = Orange.core.RuleBeamFilter_Width(width = 1)2461 2462 2463 def restore_learner(self):2464 self.learner.rule_finder.evaluator = self.evaluator2465 self.learner.rule_finder.rule_stoppingValidator = self.oldStopper2466 self.learner.rule_finder.refiner = self.refiner2467 self.learner.rule_finder.validator = self.validator2468 self.learner.rule_finder.ruleFilter = self.ruleFilter2469 2470 def computeEVD(self, data, weight_id=0, target_class=0, progress=None):2471 # initialize random seed to make experiments repeatable2472 random.seed(self.randomseed)2473 2474 # prepare learned for distribution computation2475 self.prepare_learner()2476 2477 # loop through N (sampling repetitions)2478 extreme_dists=[(0, 1, [])]2479 self.learner.rule_finder.rule_stoppingValidator.max_rule_complexity = self.oldStopper.max_rule_complexity2480 maxVals = [[] for l in range(self.oldStopper.max_rule_complexity)]2481 for d_i in range(self.n):2482 if not progress:2483 print d_i,2484 else:2485 progress(float(d_i)/self.n, None)2486 # create data set (remove and randomize)2487 tempData = self.createRandomDataSet(data)2488 self.learner.rule_finder.evaluator.rules = Orange.core.RuleList()2489 # Next, learn a rule2490 self.learner.rule_finder(tempData,weight_id,target_class, Orange.core.RuleList())2491 for l in range(self.oldStopper.max_rule_complexity):2492 qs = [r.quality for r in self.learner.rule_finder.evaluator.rules if r.complexity == l+1]2493 if qs:2494 maxVals[l].append(max(qs))2495 else:2496 maxVals[l].append(0)2497 2498 mu, beta = 1.0, 1.02499 for mi,m in enumerate(maxVals):2500 mu, beta, perc = self.compParameters(m,mu,beta)2501 extreme_dists.append((mu, beta, perc))2502 extreme_dists.extend([(0,1,[])]*(mi))2503 2504 self.restore_learner()2505 return self.createEVDistList(extreme_dists)2506 2424 2507 2425 class CrossValidation:
Note: See TracChangeset
for help on using the changeset viewer.