Example #1
0
    def run(self, argv):       
        assert(self.data is not None)

        encode_dic = {'A': [0], 'C': [1], 'G': [2], 'U': [3]}
        recog_rates = []

        
        self.start_logging(['RFOREST',
                            str(int(self.randomize_data)), argv[0]])        
        
        i = 0
        while i < self.n_runs:
            sys.stdout.write('%i ' % i)
            sys.stdout.flush()

            if self.limit_sets:
                new_sets = make_d.merge_multiclasses(self.data, 
                                                     self.set1, self.set2)
                sets = make_d.make_set(new_sets, training_fraction=1.0)
            else:
                sets = make_d.make_set(self.data, training_fraction=1.0)
            train_y, train_x, test_y, test_x = sets

            if self.randomize_data:
                random.shuffle(train_y)
                random.shuffle(test_y)
                pass
        
            print [len(x) for x in sets]

            train_x = [make_d.encode(x, encode_dic) for x in train_x]
Example #2
0
    def run(self, argv):       
        assert(self.data is not None)

        param_grid = {}
        results = []
        sum_acc = 0

        self.start_logging([self.kernel_type, 
                            str(int(self.randomize_data)), argv[0]])        
        
        i = 0
        while i < self.n_runs:
            sys.stdout.write('%i ' % i)
            sys.stdout.flush()

            if self.limit_sets:
                new_sets = make_d.merge_multiclasses(self.data, self.set1, self.set2)
                sets = make_d.make_set(new_sets, training_fraction=0.75)
            else:
                sets = make_d.make_set(self.data, training_fraction=0.75)
            train_y, train_x, test_y, test_x = sets

            if self.randomize_data:
                random.shuffle(train_y)
                random.shuffle(test_y)
                pass
        
            print [len(x) for x in sets]

            train_x = [make_d.encode(x, make_d.encode_dic) for x in train_x]
Example #3
0
def main(argv):

    global C_RANGE
    global GAMMA_RANGE
    global SET1
    global SET2

    i = 0
    param_grid = {}
    results = []
    sum_acc = 0

    init(argv[1:])    
    print SET1, SET2
    # return None

    fn = argv[0]
    dataset = make_d.read_data(open(fn))
    dataset = make_d.assign_classes(dataset)
    
    data = make_d.prepare_data(dataset)
    print data.keys(), [len(v) for v in data.values()]

    param = svm.svm_parameter('-b 1')
    if KERNEL_TYPE == 'LINEAR':
        param.kernel_type = svm.LINEAR
        GAMMA_RANGE = 1, 0, -2
    else:
        param.kernel_type = svm.RBF

    cvfunc = leave_one_out
    n_cv = None

    use_sets = not SET1 is None and not SET2 is None

    outfile = os.path.basename(fn)
    outfile = outfile.replace('.fasta', '')
    outfile = outfile.replace('.fas', '')
    if use_sets:
        outfile = ''.join(map(str, map(int, SET1))) + 'vs' + ''.join(map(str, map(int, SET2)))

    log_name = '%s-%s-%i-%s.csv' % (TIMESTAMP, 
                                    KERNEL_TYPE,
                                    int(RANDOMIZE_DATA),
                                    outfile)
    logfile = open(log_name, 'w')                                    

    while i < N_RUNS:
        sys.stdout.write('%i ' % i)
        sys.stdout.flush()


        if use_sets:
            """ TODO: If set sizes are reasonably large, do not use the complete smallest set. """
            set1 = dict([item for item in data.items() 
                         if item[0] in SET1])
            set2 = dict([item for item in data.items()
                         if item[0] in SET2])
            set1 = make_d.make_set(set1, training_fraction=1.0)
            set2 = make_d.make_set(set2, training_fraction=1.0)
            new_sets = {1.0: set1[1], -1.0: set2[1]}
            sets = make_d.make_set(new_sets, training_fraction=0.75)
        else:
            sets = make_d.make_set(data, training_fraction=0.75)
        train_y, train_x, test_y, test_x = sets

        if RANDOMIZE_DATA:
            random.shuffle(train_y)
            random.shuffle(test_y)
            pass
        
        print [len(x) for x in sets]
Example #4
0
def main(argv):

    global C_RANGE
    global GAMMA_RANGE
    global SET1
    global SET2

    i = 0
    param_grid = {}
    results = []
    sum_acc = 0

    init(argv[1:])    
    print SET1, SET2

    fn = argv[0]
    dataset = make_d.read_data(open(fn))
    dataset = make_d.assign_classes(dataset)
    data = make_d.prepare_data(dataset)
    print data.keys(), [len(v) for v in data.values()]

    param = svm.svm_parameter('-b 1')
    if KERNEL_TYPE == 'LINEAR':
        param.kernel_type = svm.LINEAR
        GAMMA_RANGE = 1, 0, -2
    else:
        param.kernel_type = svm.RBF

    cvfunc = svmfun.leave_one_out
    n_cv = None

    limit_sets = not SET1 is None and not SET2 is None

    outfile = os.path.basename(fn)
    outfile = outfile.replace('.fasta', '')
    outfile = outfile.replace('.fas', '')
    if limit_sets:
        outfile = ''.join(map(str, map(int, SET1))) + 'vs'
        outfile += ''.join(map(str, map(int, SET2)))

    log_name = '%s-%s-%i-%s.csv' % (TIMESTAMP, 
                                    KERNEL_TYPE,
                                    int(RANDOMIZE_DATA),
                                    outfile)
    logfile = open(log_name, 'w')                                    

    while i < N_RUNS:
        sys.stdout.write('%i ' % i)
        sys.stdout.flush()

        if limit_sets:
            new_sets = make_d.merge_multiclasses(data, SET1, SET2)
            sets = make_d.make_set(new_sets, training_fraction=0.75)
        else:
            sets = make_d.make_set(data, training_fraction=0.75)
        train_y, train_x, test_y, test_x = sets

        if RANDOMIZE_DATA:
            random.shuffle(train_y)
            random.shuffle(test_y)
            pass
        
        print [len(x) for x in sets]
Example #5
0
def main(argv):

    global C_RANGE
    global GAMMA_RANGE
    global SET1
    global SET2

    i = 0
    param_grid = {}
    results = []
    sum_acc = 0

    init(argv[2:])
    print SET1, SET2

    param = svm.svm_parameter("-b 1")
    if KERNEL_TYPE == "LINEAR":
        param.kernel_type = svm.LINEAR
        GAMMA_RANGE = 1, 0, -2
    else:
        param.kernel_type = svm.RBF

    cvfunc = leave_one_out
    n_cv = None

    use_sets = not SET1 is None and not SET2 is None

    fn = argv[0]
    dataset = make_d.read_data(open(fn))
    data = make_d.assign_classes(dataset)

    data = [(d[0], d[1][1:]) for d in data]

    data = make_d.prepare_data(data)
    """ Next line is just for testing. """
    data = {1.0: data[1.0], 0.0: data[0.0]}
    print data.keys(), [len(v) for v in data.values()]

    testdata = make_d.read_data(open(argv[1]))
    testset = make_d.assign_classes(testdata)

    testset = [(d[0], d[1][1:]) for d in testset]

    testset = make_d.prepare_data(testset)

    precursor = {}
    for k, v in testdata.items():
        v = v[1:]
        precursor[v] = precursor.get(v, []) + [int(k.split("_")[-1])]
    print precursor

    outfile = os.path.basename(fn)
    outfile = outfile.replace(".fasta", "")
    outfile = outfile.replace(".fas", "")
    if use_sets:
        outfile = "".join(map(str, map(int, SET1))) + "vs" + "".join(map(str, map(int, SET2)))

    log_name = "%s-%s-%i-%s.csv" % (TIMESTAMP, KERNEL_TYPE, int(RANDOMIZE_DATA), outfile)
    logfile = open(log_name, "w")

    """ Prepare test set (precursor fragments). """
    testset[-1.0] = copy.deepcopy(testset[0.0])
    del testset[0.0]
    testset = make_d.make_set(testset, balanced_set=False, training_fraction=1.0)
    """ 'Training' and 'Test' sets flipped """
    test_y, test_x = testset[:2]
    encoded_x = [make_d.encode(x, make_d.encode_dic) for x in test_x]

    # logfile.write(',%s\n' % ','.join(map(str, map(int, test_y))))

    """ Train and predict """
    row = [0.0 for x in test_x]
    while i < N_RUNS:
        sys.stdout.write("%i " % i)
        sys.stdout.flush()

        set1 = dict([item for item in data.items() if item[0] == 1.0])
        set2 = dict([item for item in data.items() if item[0] == 0.0])
        set1 = make_d.make_set(set1, training_fraction=1.0)
        set2 = make_d.make_set(set2, training_fraction=1.0)
        new_sets = {1.0: set1[1], -1.0: set2[1]}
        sets = make_d.make_set(new_sets, training_fraction=1.0)
        train_y, train_x, dummy_y, dummy_x = sets

        print [len(x) for x in sets]
Example #6
0
def main(argv):

    global C_RANGE
    global GAMMA_RANGE

    init(argv[2:])    

    fn = argv[0]
    dataset = make_d.read_data(open(fn))
    items = dataset.items()
    keys = [float(x[0].split('_')[0][3:]) for x in items]
    dataset = zip(keys, [v[1] for v in items])

    data = make_d.prepare_data(dataset)
    print data.keys(), [len(v) for v in data.values()]

    param = svm.svm_parameter('-b 1')
    if KERNEL_TYPE == 'LINEAR':
        param.kernel_type = svm.LINEAR
        GAMMA_RANGE = 1, 0, -2
    else:
        param.kernel_type = svm.RBF

    fn_test = argv[1]
    testdata = make_d.read_data(open(fn_test))
    testitems = testdata.items()
    testkeys = [float(x[0].split('_')[0][3:]) for x in testitems]
    testdataset = zip(testkeys, [v[1] for v in testitems])
    testdata = make_d.prepare_data(testdataset)
    

    cvfunc = leave_one_out
    n_cv = None

    outfile = os.path.basename(fn)
    outfile = outfile.replace('.fasta', '')
    outfile = outfile.replace('.fas', '')

    log_name = '%s-%s-%i-%s.csv' % (TIMESTAMP, 
                                    KERNEL_TYPE,
                                    int(RANDOMIZE_DATA),
                                    outfile)
    logfile = open(log_name, 'w')
                                    

    i = 0
    param_grid = {}
    results = []
    sum_acc = 0

    sets = make_d.make_set(data, balanced_set=False, training_fraction=1.0)
    train_y, train_x, test_y, test_x = sets
    train_x = [make_d.encode(x, make_d.encode_dic) for x in train_x]
    
    testsets = make_d.make_set(testdata, balanced_set=False, 
                               training_fraction=0.0)
    dummy0, dummy1, test_y, test_x = testsets
    test_x  = [make_d.encode(x, make_d.encode_dic) for x in test_x]
        
    param_grid = {}
    param_grid = grid_search(train_y, train_x, param, param_grid,
                             leave_one_out, n_cv, C_RANGE, GAMMA_RANGE)
    ranking = []
    for k, v in param_grid.items():
        recognized = [v_i[0][0] == v_i[3] for v_i in v]
        recog_rate = sum(map(int, recognized))/float(len(recognized))
        ranking.append((recog_rate, k))
    ranking.sort()
    
    param.C, param.gamma = map(lambda x: 2**x, ranking[-1][1])
    problem = svm.svm_problem(train_y, train_x)
    model = svmutil.svm_train(problem, param, '-q')
    result = svmutil.svm_predict(test_y, test_x, model, '-b 1')
    print result
    
    """
    cur_result = zip(result[0], test_y)
    cur_acc = compute_accuracy(cur_result)
    
    results.extend(cur_result)
    total_acc = compute_accuracy(results)

    sum_acc += cur_acc
    mean_acc = sum_acc/(i+1)
    # print cur_acc, mean_acc, total_acc
    
    logfile.write('%f,%f,%f\n' % (cur_acc, mean_acc, total_acc))

    
    print 'ACC', compute_accuracy(results)
    """
    logfile.close()
    return None