def compute_statistics(self): """ Compute max, min, average, median, standard deviation and number of elements. """ if not self.statistics: self.data_sorted = self.data[:] self.data_sorted.sort() self._min = self.data_sorted[0] self._max = self.data_sorted[-1] self._median = self.data_sorted[int(len(self.data)/2)] self._mean = pylib_basics.mean(self.data) self._sd = pylib_basics.standard_deviation(self.data) self.statistics = True
def compute_statistics(self): """ Compute max, min, average, median, standard deviation and number of elements. """ if not self.statistics: self.data_sorted = self.data[:] self.data_sorted.sort() self._min = self.data_sorted[0] self._max = self.data_sorted[-1] self._median = self.data_sorted[int(len(self.data) / 2)] self._mean = pylib_basics.mean(self.data) self._sd = pylib_basics.standard_deviation(self.data) self.statistics = True
(succ, count) = tree.classify_set(i[1],pylib_basics.verbose()) succ_percent = float(succ)/count*100 print "Test: %4d out of %4d, %7.3f%%" % (succ, count, succ_percent) te_results.append((succ, count,succ_percent)) tr_percent = (map(lambda x:x[2],tr_results)) te_percent = (map(lambda x:x[2],te_results)) rig = (map(lambda x:x[3],tr_results)) depths = (map(lambda x:x[4],tr_results)) size = (map(lambda x:x[5],tr_results)) leaves = (map(lambda x:x[6],tr_results)) print "%s Splits %2d RIGL %5.3f RIG %5.3f+/-%5.3f (%5.2f+/-%4.2f, %7.2f+/-%4.2f, %7.2f+/-%4.2f) Train: %7.3f+/-%7.3f%% Test: %7.3f+/-%7.3f%%" %\ (feature_selection, max_split, relgain_limit, pylib_basics.mean(rig), pylib_basics.standard_deviation(rig), pylib_basics.mean(depths), pylib_basics.standard_deviation(depths), pylib_basics.mean(size), pylib_basics.standard_deviation(size), pylib_basics.mean(leaves), pylib_basics.standard_deviation(leaves), pylib_basics.mean(tr_percent), pylib_basics.standard_deviation(tr_percent), pylib_basics.mean(te_percent), pylib_basics.standard_deviation(te_percent)) else: tree = dectree_constructor(set, entropy_compare_fun,