def setup(self, argv): self.set_parameters(argv[1:]) print self.set1, self.set2 print self.__dict__.items() self.data = make_d.read_data(open(argv[0])) self.data = make_d.assign_classes(self.data) self.data = make_d.prepare_data(self.data) print self.data.keys(), [len(v) for v in self.data.values()] pass
def main(argv): global C_RANGE global GAMMA_RANGE global SET1 global SET2 i = 0 param_grid = {} results = [] sum_acc = 0 init(argv[1:]) print SET1, SET2 # return None fn = argv[0] dataset = make_d.read_data(open(fn)) dataset = make_d.assign_classes(dataset) data = make_d.prepare_data(dataset) print data.keys(), [len(v) for v in data.values()] param = svm.svm_parameter('-b 1') if KERNEL_TYPE == 'LINEAR': param.kernel_type = svm.LINEAR GAMMA_RANGE = 1, 0, -2 else: param.kernel_type = svm.RBF cvfunc = leave_one_out n_cv = None use_sets = not SET1 is None and not SET2 is None outfile = os.path.basename(fn) outfile = outfile.replace('.fasta', '') outfile = outfile.replace('.fas', '') if use_sets: outfile = ''.join(map(str, map(int, SET1))) + 'vs' + ''.join(map(str, map(int, SET2))) log_name = '%s-%s-%i-%s.csv' % (TIMESTAMP, KERNEL_TYPE, int(RANDOMIZE_DATA), outfile) logfile = open(log_name, 'w') while i < N_RUNS: sys.stdout.write('%i ' % i) sys.stdout.flush() if use_sets: """ TODO: If set sizes are reasonably large, do not use the complete smallest set. """ set1 = dict([item for item in data.items() if item[0] in SET1]) set2 = dict([item for item in data.items() if item[0] in SET2]) set1 = make_d.make_set(set1, training_fraction=1.0) set2 = make_d.make_set(set2, training_fraction=1.0) new_sets = {1.0: set1[1], -1.0: set2[1]} sets = make_d.make_set(new_sets, training_fraction=0.75) else: sets = make_d.make_set(data, training_fraction=0.75) train_y, train_x, test_y, test_x = sets if RANDOMIZE_DATA: random.shuffle(train_y) random.shuffle(test_y) pass print [len(x) for x in sets]
def main(argv): global C_RANGE global GAMMA_RANGE global SET1 global SET2 i = 0 param_grid = {} results = [] sum_acc = 0 init(argv[1:]) print SET1, SET2 fn = argv[0] dataset = make_d.read_data(open(fn)) dataset = make_d.assign_classes(dataset) data = make_d.prepare_data(dataset) print data.keys(), [len(v) for v in data.values()] param = svm.svm_parameter('-b 1') if KERNEL_TYPE == 'LINEAR': param.kernel_type = svm.LINEAR GAMMA_RANGE = 1, 0, -2 else: param.kernel_type = svm.RBF cvfunc = svmfun.leave_one_out n_cv = None limit_sets = not SET1 is None and not SET2 is None outfile = os.path.basename(fn) outfile = outfile.replace('.fasta', '') outfile = outfile.replace('.fas', '') if limit_sets: outfile = ''.join(map(str, map(int, SET1))) + 'vs' outfile += ''.join(map(str, map(int, SET2))) log_name = '%s-%s-%i-%s.csv' % (TIMESTAMP, KERNEL_TYPE, int(RANDOMIZE_DATA), outfile) logfile = open(log_name, 'w') while i < N_RUNS: sys.stdout.write('%i ' % i) sys.stdout.flush() if limit_sets: new_sets = make_d.merge_multiclasses(data, SET1, SET2) sets = make_d.make_set(new_sets, training_fraction=0.75) else: sets = make_d.make_set(data, training_fraction=0.75) train_y, train_x, test_y, test_x = sets if RANDOMIZE_DATA: random.shuffle(train_y) random.shuffle(test_y) pass print [len(x) for x in sets]
def main(argv): global C_RANGE global GAMMA_RANGE global SET1 global SET2 i = 0 param_grid = {} results = [] sum_acc = 0 init(argv[2:]) print SET1, SET2 param = svm.svm_parameter("-b 1") if KERNEL_TYPE == "LINEAR": param.kernel_type = svm.LINEAR GAMMA_RANGE = 1, 0, -2 else: param.kernel_type = svm.RBF cvfunc = leave_one_out n_cv = None use_sets = not SET1 is None and not SET2 is None fn = argv[0] dataset = make_d.read_data(open(fn)) data = make_d.assign_classes(dataset) data = [(d[0], d[1][1:]) for d in data] data = make_d.prepare_data(data) """ Next line is just for testing. """ data = {1.0: data[1.0], 0.0: data[0.0]} print data.keys(), [len(v) for v in data.values()] testdata = make_d.read_data(open(argv[1])) testset = make_d.assign_classes(testdata) testset = [(d[0], d[1][1:]) for d in testset] testset = make_d.prepare_data(testset) precursor = {} for k, v in testdata.items(): v = v[1:] precursor[v] = precursor.get(v, []) + [int(k.split("_")[-1])] print precursor outfile = os.path.basename(fn) outfile = outfile.replace(".fasta", "") outfile = outfile.replace(".fas", "") if use_sets: outfile = "".join(map(str, map(int, SET1))) + "vs" + "".join(map(str, map(int, SET2))) log_name = "%s-%s-%i-%s.csv" % (TIMESTAMP, KERNEL_TYPE, int(RANDOMIZE_DATA), outfile) logfile = open(log_name, "w") """ Prepare test set (precursor fragments). """ testset[-1.0] = copy.deepcopy(testset[0.0]) del testset[0.0] testset = make_d.make_set(testset, balanced_set=False, training_fraction=1.0) """ 'Training' and 'Test' sets flipped """ test_y, test_x = testset[:2] encoded_x = [make_d.encode(x, make_d.encode_dic) for x in test_x] # logfile.write(',%s\n' % ','.join(map(str, map(int, test_y)))) """ Train and predict """ row = [0.0 for x in test_x] while i < N_RUNS: sys.stdout.write("%i " % i) sys.stdout.flush() set1 = dict([item for item in data.items() if item[0] == 1.0]) set2 = dict([item for item in data.items() if item[0] == 0.0]) set1 = make_d.make_set(set1, training_fraction=1.0) set2 = make_d.make_set(set2, training_fraction=1.0) new_sets = {1.0: set1[1], -1.0: set2[1]} sets = make_d.make_set(new_sets, training_fraction=1.0) train_y, train_x, dummy_y, dummy_x = sets print [len(x) for x in sets]