Changeset 9270:3b6632fc8406 in orange


Ignore:
Timestamp:
11/25/11 11:54:22 (2 years ago)
Author:
anze <anze.staric@…>
Branch:
default
Convert:
638ec90037642a20cf654ecbf7df714870d469bb
Message:

Added more unittests for evaluation.testing.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • testing/unittests/tests/test_evaluation.py

    r9224 r9270  
    1 import operator, unittest 
     1import itertools, operator, unittest 
    22from collections import Counter 
    33 
    44import Orange 
    55 
     6example_no = Orange.data.variable.new_meta_id() 
     7 
    68class DummyLearner(Orange.classification.majority.MajorityLearner): 
    7     def __init__(self, *args, **kwds): 
     9    def __init__(self, id=None): 
     10        self.id=id 
    811        self.data = [] 
    912        self.classifiers = [] 
    10         super(DummyLearner, self).__init__(*args, **kwds) 
     13        self.classifier_no = 0 
     14        super(DummyLearner, self).__init__() 
    1115 
    1216    def __call__(self, dataset, weight=0): 
    1317        self.data.append(dataset) 
     18 
    1419        cl = super(DummyLearner, self).__call__(dataset, weight) 
    15         cl = DummyClassifier(cl) 
     20        cl = DummyClassifier(cl, self.id, self.classifier_no) 
     21        self.classifier_no += 1 
    1622        self.classifiers.append(cl) 
    1723        return cl 
     
    1925class DummyClassifier(object): 
    2026    name="DummyClassifier" 
    21     def __init__(self, base_class): 
     27    def __init__(self, base_class, learner_id, classifier_no): 
    2228        self.base_class = base_class 
     29        self.classifier_no = classifier_no 
     30        self.learner_id = learner_id 
    2331        self.data = [] 
     32        self.predictions = [] 
    2433 
    2534    def __call__(self, example, options=None): 
     35        value, probability = self.base_class.__call__(example, options) 
     36        p = [self.learner_id, self.classifier_no, int(example[example_no])] 
    2637        self.data.append(example) 
    27         return self.base_class.__call__(example, options) 
     38        self.predictions.append(p) 
     39 
     40        return value, p 
    2841 
    2942class DummyPreprocessor(object): 
     
    3245 
    3346    def __call__(self, *datasets): 
    34         # TODO: What is LT Preprocessor and how to use it? 
    3547        new_datasets = [] 
    3648        for dataset in datasets: 
     
    4658        return [] 
    4759 
    48 def create_examples(id): 
    49     data = Orange.data.Table("iris") 
    50     for i, inst in enumerate(data): 
    51         inst[id] = i 
    52     return data 
    53  
    54  
    5560class TestEvaluation(unittest.TestCase): 
    5661    def setUp(self): 
    57         self.meta_id = Orange.data.variable.new_meta_id() 
    58         self.examples = create_examples(self.meta_id) 
     62        self.example_no = example_no 
     63        self.learner = DummyLearner() 
     64        self.examples = Orange.data.Table("iris") 
     65        for i, inst in enumerate(self.examples): 
     66            inst[self.example_no] = i 
    5967 
    6068        self.preprocessed_with_both = Orange.data.variable.new_meta_id() 
     
    7179        self.indices = [i // examples_in_fold for i in range(len(self.examples))] 
    7280        self.callback_calls = 0 
    73  
    74         self.evaluation = Orange.evaluation.testing.Evaluation() 
     81        reload(Orange.evaluation.testing) 
     82        self.evaluation = Orange.evaluation.testing 
     83 
     84    def test_with_indices(self): 
     85        learners = [DummyLearner(id=0), DummyLearner(id=1), DummyLearner(id=2)] 
     86        self.callback_calls = 0 
     87 
     88        test_results = self.evaluation.test_with_indices(learners, (self.examples, 0), self.indices, callback=self.callback) 
     89 
     90        for l, learner in enumerate(learners): 
     91            predicted_results = [prediction for classifier in learner.classifiers for prediction in classifier.predictions] 
     92            returned_results = [r.probabilities[l] for r in test_results.results] 
     93            self.assertItemsEqual(returned_results, predicted_results) 
     94 
     95            # Each example should be used for training (n-1)x, where n is the number of folds 
     96            example_cnt = 0 
     97            for fold, data in enumerate(learner.data): 
     98                for example in data: 
     99                    # Classifier should be trained on examples where fold is different from current fold 
     100                    self.assertNotEqual(self.indices[int(example[self.example_no])], fold) 
     101                    example_cnt += 1 
     102            self.assertEqual(example_cnt, len(self.examples) * (self.folds-1)) 
     103 
     104            # Each example should be used for testing only once 
     105            example_cnt = 0 
     106            for fold, classifier in enumerate(learner.classifiers): 
     107                for example in classifier.data: 
     108                    # Classifier should perform classification on examples with same fold number 
     109                    self.assertEqual(self.indices[int(example[self.example_no])], fold) 
     110                    example_cnt += 1 
     111            self.assertEqual(example_cnt, len(self.examples)) 
     112 
     113        # Callback function should be called once for every fold. 
     114        self.assertEqual(self.callback_calls, self.folds) 
    75115 
    76116    def callback(self): 
    77117        self.callback_calls += 1 
    78118 
    79     def test_with_indices(self): 
    80         # Perform test on MajorityLearner and Iris dataset. 
    81         learner = DummyLearner() 
    82         self.callback_calls = 0 
    83         test_results = self.evaluation.test_with_indices([learner], (self.examples, 0), self.indices, callback=self.callback) 
    84         expected_results = [1]*50+[0]*100 
    85         predicted_classes = map(lambda x:x.classes[0], test_results.results) 
    86         self.assertItemsEqual(expected_results, predicted_classes) 
    87  
    88         # Each example should be used for training (n-1)x, where n is the number of folds 
    89         example_cnt = 0 
    90         for fold, data in enumerate(learner.data): 
    91             for example in data: 
    92                 # Classifier should be trained on examples where fold is different from current fold 
    93                 self.assertNotEqual(self.indices[int(example[self.meta_id])], fold) 
    94                 example_cnt += 1 
    95         self.assertEqual(example_cnt, len(self.examples) * (self.folds-1)) 
    96  
    97         # Each example should be used for testing only once 
    98         example_cnt = 0 
    99         for fold, classifier in enumerate(learner.classifiers): 
    100             for example in classifier.data: 
    101                 # Classifier should perform classification on examples with same fold number 
    102                 self.assertEqual(self.indices[int(example[self.meta_id])], fold) 
    103                 example_cnt += 1 
    104         self.assertEqual(example_cnt, len(self.examples)) 
    105  
    106         # Callback function should be called once for every fold. 
    107         self.assertEqual(self.callback_calls, self.folds) 
    108  
    109119    def test_with_indices_can_store_examples_and_classifiers(self): 
    110         learner = DummyLearner() 
     120        test_results = self.evaluation.test_with_indices([self.learner], self.examples, self.indices, 
     121                                                        store_examples=True, store_classifiers=True) 
     122        self.assertGreater(len(test_results.examples), 0) 
     123        self.assertGreater(len(test_results.classifiers), 0) 
     124        classifiers = map(operator.itemgetter(0), test_results.classifiers) 
     125        self.assertItemsEqual(self.learner.classifiers, classifiers) 
     126 
     127    def test_with_indices_uses_preprocessors(self): 
     128        self.evaluation.test_with_indices([self.learner], self.examples, self.indices, 
     129                                           preprocessors=self.preprocessors) 
     130        self.assertPreprocessedCorrectly() 
     131 
     132    def test_with_indices_handles_errors(self): 
     133        learner = DummyLearner() 
     134        # No data should raise a Value error 
     135        with self.assertRaises(ValueError): 
     136            self.evaluation.test_with_indices([learner], [], self.indices) 
     137 
     138        # If one fold is not represented in indices, cross validation should still execute 
     139        self.evaluation.test_with_indices([learner], self.examples, [2] + [1]*(len(self.examples)-1)) 
     140 
     141        # If preprocessors is "broken" (returns no data), error should be  raised 
     142        with self.assertRaises(SystemError): 
     143            self.evaluation.test_with_indices([learner], self.examples, self.indices, preprocessors=(("L", BrokenPreprocessor()),)) 
     144        with self.assertRaises(SystemError): 
     145            self.evaluation.test_with_indices([learner], self.examples, self.indices, preprocessors=(("T", BrokenPreprocessor()),)) 
     146 
     147 
     148    def test_cross_validation(self): 
     149        learners = [DummyLearner(id=0), DummyLearner(id=1), DummyLearner(id=2)] 
     150        self.callback_calls = 0 
     151        folds = 10 
     152        test_results = self.evaluation.cross_validation(learners, (self.examples, 0), folds=folds, callback=self.callback) 
     153 
     154        for l, learner in enumerate(learners): 
     155            predicted_results = [prediction for classifier in learner.classifiers for prediction in classifier.predictions] 
     156            returned_results = [r.probabilities[l] for r in test_results.results] 
     157            self.assertItemsEqual(returned_results, predicted_results) 
     158 
     159            # Each example should be used for training (n-1)x, where n is the number of folds 
     160            example_cnt = 0 
     161            for fold, data in enumerate(learner.data): 
     162                for example in data: 
     163                    example_cnt += 1 
     164            self.assertEqual(example_cnt, len(self.examples) * (folds-1)) 
     165 
     166            # Each example should be used for testing only once 
     167            example_cnt = 0 
     168            for fold, classifier in enumerate(learner.classifiers): 
     169                for example in classifier.data: 
     170                    example_cnt += 1 
     171            self.assertEqual(example_cnt, len(self.examples)) 
     172 
     173    def test_leave_one_out(self): 
     174        learners = [DummyLearner(id=0), DummyLearner(id=1), DummyLearner(id=2)] 
     175        self.callback_calls = 0 
     176         
     177        test_results = self.evaluation.leave_one_out(learners, self.examples) 
     178        for l, learner in enumerate(learners): 
     179            predicted_results = [prediction for classifier in learner.classifiers for prediction in classifier.predictions] 
     180            returned_results = [r.probabilities[l] for r in test_results.results] 
     181            self.assertItemsEqual(returned_results, predicted_results) 
     182 
     183            # Each example should be used for training (n-1)x, where n is the number of folds 
     184            example_cnt = 0 
     185            for fold, data in enumerate(learner.data): 
     186                for example in data: 
     187                    example_cnt += 1 
     188            self.assertEqual(example_cnt, len(self.examples) * (len(self.examples)-1)) 
     189 
     190            # Each example should be used for testing only once 
     191            example_cnt = 0 
     192            for fold, classifier in enumerate(learner.classifiers): 
     193                for example in classifier.data: 
     194                    example_cnt += 1 
     195            self.assertEqual(example_cnt, len(self.examples)) 
     196 
     197    def test_on_data(self): 
     198        pass 
     199 
     200    def test_on_data_can_store_examples_and_classifiers(self): 
     201        learner = DummyLearner() 
     202        classifier = learner(self.examples) 
    111203        # Passing store_examples = True should make examples accessible 
    112         test_results = self.evaluation.test_with_indices([learner], self.examples, self.indices, 
    113                                                         store_examples=True, store_classifiers=True) 
    114         self.assertGreater(len(test_results.examples), 0) 
    115         self.assertGreater(len(test_results.classifiers), 0) 
    116         classifiers = map(operator.itemgetter(0), test_results.classifiers) 
    117         self.assertItemsEqual(learner.classifiers, classifiers) 
    118  
    119     def test_with_indices_uses_preprocessors(self): 
    120         # Preprocessors should be applyed to data as specified in their type 
    121         learner = DummyLearner() 
    122         self.evaluation.test_with_indices([learner], 
    123                                      self.examples, 
    124                                      self.indices, 
    125                                      preprocessors=self.preprocessors) 
    126  
     204        test_results = self.evaluation.test_on_data([classifier], self.examples, 
     205                                                    store_examples=True, store_classifiers=True) 
     206        self.assertGreater(len(test_results.examples), 0) 
     207        self.assertGreater(len(test_results.classifiers), 0) 
     208        self.assertItemsEqual(learner.classifiers, test_results.classifiers) 
     209 
     210    def test_learn_and_test_on_learn_data(self): 
     211        self.callback_calls = 0 
     212        learner = DummyLearner() 
     213 
     214        test_results = self.evaluation.learn_and_test_on_learn_data([learner], self.examples, callback=self.callback, 
     215                                                    store_examples=True, store_classifiers=True) 
     216 
     217        self.assertEqual(self.callback_calls, 1) 
     218 
     219 
     220 
     221    def test_learn_and_test_on_learn_data_with_preprocessors(self): 
     222        self.learner = DummyLearner() 
     223        test_results = self.evaluation.learn_and_test_on_learn_data([self.learner], self.examples, 
     224                                                    preprocessors=self.preprocessors, 
     225                                                    callback=self.callback, store_examples=True, store_classifiers=True) 
     226        self.assertPreprocessedCorrectly() 
     227 
     228    def test_learn_and_test_on_learn_data_can_store_examples_and_classifiers(self): 
     229        learner = DummyLearner() 
     230         
     231        test_results = self.evaluation.learn_and_test_on_learn_data([learner], self.examples, 
     232                                                    store_examples=True, store_classifiers=True) 
     233        self.assertGreater(len(test_results.examples), 0) 
     234        self.assertGreater(len(test_results.classifiers), 0) 
     235        self.assertItemsEqual(learner.classifiers, test_results.classifiers) 
     236 
     237    def test_learn_and_test_on_test_data(self): 
     238        self.callback_calls = 0 
     239        learner = DummyLearner() 
     240 
     241        test_results = self.evaluation.learn_and_test_on_test_data([learner], self.examples, self.examples, 
     242                                                    callback=self.callback, store_examples=True, store_classifiers=True) 
     243        self.assertEqual(self.callback_calls, 1) 
     244 
     245    def test_learn_and_test_on_test_data_with_preprocessors(self): 
     246        self.learner = DummyLearner() 
     247        test_results = self.evaluation.learn_and_test_on_test_data([self.learner], self.examples, self.examples, 
     248                                                    preprocessors=self.preprocessors, 
     249                                                    callback=self.callback, store_examples=True, store_classifiers=True) 
     250        self.assertPreprocessedCorrectly() 
     251 
     252    def test_learn_and_test_on_test_data_can_store_examples_and_classifiers(self): 
     253        learner = DummyLearner() 
     254 
     255        test_results = self.evaluation.learn_and_test_on_test_data([learner], self.examples, self.examples, 
     256                                                    store_examples=True, store_classifiers=True) 
     257        self.assertGreater(len(test_results.examples), 0) 
     258        self.assertGreater(len(test_results.classifiers), 0) 
     259        self.assertItemsEqual(learner.classifiers, test_results.classifiers) 
     260 
     261    def test_learning_curve_with_test_data(self): 
     262        learner = DummyLearner() 
     263        times=10 
     264        proportions=Orange.core.frange(0.1) 
     265        test_results = self.evaluation.learning_curve_with_test_data([learner], self.examples, self.examples, 
     266                                                                              times=times, proportions=proportions) 
     267        # We expect the method to return a list of test_results, one instance for each proportion. Each 
     268        # instance should have "times" folds. 
     269        self.assertEqual(len(test_results), len(proportions)) 
     270        for test_result in test_results: 
     271            self.assertEqual(test_result.numberOfIterations, times) 
     272            self.assertEqual(len(test_result.results), times*len(self.examples)) 
     273 
     274 
     275 
     276    def test_learning_curve_with_test_data_can_store_examples_and_classifiers(self): 
     277        learner = DummyLearner() 
     278 
     279        test_results = self.evaluation.learn_and_test_on_test_data([learner], self.examples, self.examples, 
     280                                                                            store_examples=True, store_classifiers=True) 
     281        self.assertGreater(len(test_results.examples), 0) 
     282        self.assertGreater(len(test_results.classifiers), 0) 
     283        self.assertItemsEqual(learner.classifiers, test_results.classifiers) 
     284 
     285 
     286    def test_proportion_test(self): 
     287        self.callback_calls = 0 
     288        times = 10 
     289        learner = DummyLearner() 
     290 
     291        test_results = self.evaluation.proportion_test([learner], self.examples, learning_proportion=.7, times=times, 
     292                                                                callback = self.callback) 
     293 
     294        self.assertEqual(self.callback_calls, times) 
     295 
     296    def test_learning_curve(self): 
     297        self.callback_calls = 0 
     298        times = 10 
     299        proportions=Orange.core.frange(0.1) 
     300        folds=10 
     301        learner = DummyLearner() 
     302 
     303        test_results = self.evaluation.learning_curve([learner], self.examples, 
     304                                                                callback = self.callback) 
     305 
     306        # Ensure that each iteration is learned on correct proportion of training examples 
     307        for proportion, data in zip((p for p in proportions for _ in range(10)), learner.data): 
     308            actual_examples = len(data) 
     309            expected_examples = len(self.examples)*proportion*(folds-1)/folds 
     310            self.assertTrue(abs(actual_examples - expected_examples) <= 1) 
     311 
     312        # Ensure results are not lost 
     313        predicted_results = [prediction for classifier in learner.classifiers for prediction in classifier.predictions] 
     314        returned_results = [r.probabilities[0] for tr in test_results for r in tr.results] 
     315        self.assertItemsEqual(returned_results, predicted_results) 
     316         
     317        self.assertEqual(self.callback_calls, folds*len(proportions)) 
     318 
     319    #TODO: LearningCurveN tests 
     320 
     321    def assertPreprocessedCorrectly(self): 
    127322        # Original examples should be left intact 
    128323        for example in self.examples: 
     
    131326            self.assertFalse(example.has_meta(self.preprocessed_with_test)) 
    132327            self.assertFalse(example.has_meta(self.preprocessed_with_learn_test)) 
    133  
    134         for fold, data in enumerate(learner.data): 
     328        for fold, data in enumerate(self.learner.data): 
    135329            for example in data: 
    136330                # Preprocessors both, learn and learntest should be applied to learn data. 
     
    140334                # Preprocessor test should not be applied to learn data. 
    141335                self.assertFalse(example.has_meta(self.preprocessed_with_test)) 
    142  
    143         for fold, classifier in enumerate(learner.classifiers): 
     336        for fold, classifier in enumerate(self.learner.classifiers): 
    144337            for example in classifier.data: 
    145338                # Preprocessors both, test and learntest should be applied to test data. 
     
    150343                self.assertFalse(example.has_meta(self.preprocessed_with_learn)) 
    151344 
    152     def test_with_indices_handles_errors(self): 
    153         learner = DummyLearner() 
    154         # No data should raise a Value error 
    155         with self.assertRaises(ValueError): 
    156             self.evaluation.test_with_indices([learner], [], self.indices) 
    157  
    158         # If one fold is not represented in indices, cross validation should still execute 
    159         self.evaluation.test_with_indices([learner], self.examples, [2] + [1]*(len(self.examples)-1)) 
    160  
    161         # If preprocessors is "broken" (returns no data), error should be  raised 
    162         with self.assertRaises(SystemError): 
    163             self.evaluation.test_with_indices([learner], self.examples, self.indices, preprocessors=(("L", BrokenPreprocessor()),)) 
    164         with self.assertRaises(SystemError): 
    165             self.evaluation.test_with_indices([learner], self.examples, self.indices, preprocessors=(("T", BrokenPreprocessor()),)) 
    166  
    167  
    168     def test_cross_validation(self): 
    169         def dummy_test_with_indices(*args, **kwargs): 
    170             return kwargs["indices"], kwargs 
    171         self.evaluation.test_with_indices = dummy_test_with_indices 
    172  
    173  
    174         _, kwargs = self.evaluation.cross_validation([], self.examples, self.folds, 
    175                                                                  preprocessors=self.preprocessors, 
    176                                                                  callback=self.callback, 
    177                                                                  store_classifiers=True, 
    178                                                                  store_examples=True) 
    179         self.assertIn("preprocessors", kwargs) 
    180         self.assertEqual(kwargs["preprocessors"], self.preprocessors) 
    181         self.assertIn("callback", kwargs) 
    182         self.assertEqual(kwargs["callback"], self.callback) 
    183         self.assertIn("store_classifiers", kwargs) 
    184         self.assertEqual(kwargs["store_classifiers"], True) 
    185         self.assertIn("store_examples", kwargs) 
    186         self.assertEqual(kwargs["store_examples"], True) 
    187  
    188         indices1, _ = self.evaluation.cross_validation([], self.examples, self.folds) 
    189         indices2, _ = self.evaluation.cross_validation([], self.examples, self.folds) 
    190  
    191         # By default, cross_validation generates the same indices every time. (multiple runs of 
    192         # cross-validation produce the same results) 
    193         self.assertEqual(indices1, indices2) 
    194  
    195         indices3, _ = self.evaluation.cross_validation([], self.examples, self.folds, random_generator=3145) 
    196         indices4, _ = self.evaluation.cross_validation([], self.examples, self.folds, random_generator=3145) 
    197         # Providing the same random seed should give use the same indices 
    198         self.assertEqual(indices3, indices4) 
    199         # But different from default ones 
    200         self.assertNotEqual(indices1, indices3) 
    201  
    202         rg = Orange.core.RandomGenerator() 
    203         indices5, _ = self.evaluation.cross_validation([], self.examples, self.folds, random_generator=rg) 
    204         rg.reset() 
    205         indices6, _ = self.evaluation.cross_validation([], self.examples, self.folds, random_generator=rg) 
    206         # Using the same random generator and resetting it before calling cross-validation should result 
    207         # in same indices 
    208         self.assertEqual(indices5, indices6) 
    209  
    210         ds = Orange.data.Table("iris") 
    211         indices1,_ = self.evaluation.cross_validation([], ds, 3, stratified=Orange.core.MakeRandomIndices.NotStratified) 
    212         indices2,_ = self.evaluation.cross_validation([], ds, 3, stratified=Orange.core.MakeRandomIndices.Stratified) 
    213  
    214         # We know that the iris dataset has 150 instances and 3 class values. First 50 examples belong to first class, 
    215         # Next 50 to the second and the rest to the third. 
    216         # When using stratification, class distributions in folds should be about the same (max difference of one 
    217         # instance per class) 
    218         freq = Counter(indices2[:50]), Counter(indices2[50:100]), Counter(indices2[100:]) #Get class value distributions 
    219         frequencies = [[freq[fold][cls] for cls in range(3)] for fold in range(3)] 
    220         for value_counts in frequencies: 
    221             self.assertTrue(max(value_counts)-min(value_counts) <= 1) 
    222  
    223         # If stratification is not enabled, class value numbers in different folds usually vary. 
    224         freq = Counter(indices1[:50]), Counter(indices1[50:100]), Counter(indices1[100:]) #Get class value distributions 
    225         frequencies = [[freq[fold][cls] for cls in range(3)] for fold in range(3)] 
    226         for value_counts in frequencies: 
    227             self.assertTrue(max(value_counts)-min(value_counts) > 1) 
    228  
    229     def test_leave_one_out(self): 
    230         def dummy_test_with_indices(*args, **kwargs): 
    231             return kwargs["indices"], kwargs 
    232         self.evaluation.test_with_indices = dummy_test_with_indices 
    233          
    234         indices, kwargs = self.evaluation.leave_one_out([], self.examples, 
    235                                                                  preprocessors=self.preprocessors, 
    236                                                                  callback=self.callback, 
    237                                                                  store_classifiers=True, 
    238                                                                  store_examples=True) 
    239         self.assertIn("preprocessors", kwargs) 
    240         self.assertEqual(kwargs["preprocessors"], self.preprocessors) 
    241         self.assertIn("callback", kwargs) 
    242         self.assertEqual(kwargs["callback"], self.callback) 
    243         self.assertIn("store_classifiers", kwargs) 
    244         self.assertEqual(kwargs["store_classifiers"], True) 
    245         self.assertIn("store_examples", kwargs) 
    246         self.assertEqual(kwargs["store_examples"], True) 
    247         self.assertItemsEqual(indices, range(len(self.examples))) 
    248  
    249     def test_rest(self): 
    250         classifiers = [DummyLearner()(self.examples)] 
    251         learners = [DummyLearner()] 
    252         Orange.evaluation.testing.test_on_data(classifiers, self.examples) 
    253         Orange.evaluation.testing.learn_and_test_on_learn_data(learners, self.examples) 
    254         Orange.evaluation.testing.learn_and_test_on_test_data(learners, self.examples, self.examples) 
    255         Orange.evaluation.testing.learning_curve(learners, self.examples) 
    256         Orange.evaluation.testing.learning_curve_n(learners, self.examples) 
    257         Orange.evaluation.testing.learning_curve_with_test_data(learners, self.examples, self.examples) 
    258         Orange.evaluation.testing.proportion_test(learners, self.examples, 0.7) 
    259  
    260  
    261345if __name__ == '__main__': 
    262346    unittest.main() 
Note: See TracChangeset for help on using the changeset viewer.