Esempio n. 1
0
def part_a(org_train, org_train_labels):
    """
    This function calculates part A
    :return: best learning rate
    """
    print "part a - start"
    learning_rate_lst = np.array(list(range(1, 99,
                                            1))).astype("float32") / 100.0

    validating_acc_lst = []
    for lr in learning_rate_lst:
        mean_acc = 0
        for i in range(10):
            svm = SVM(org_train.shape[1])
            svm.train(org_train, org_train_labels, lr, T=1000)
            mean_acc += svm.test(org_validation, org_validation_labels)
        validating_acc_lst.append(mean_acc / (i + 1))
    plt.figure()
    plot_graph(validating_acc_lst, learning_rate_lst, "q3_part_a", "",
               "Accuracy vs Learning Rate for SVM", "Accuracy",
               "Learning Rate")
    best_acc_indx = validating_acc_lst.index(max(validating_acc_lst))
    best_lr = learning_rate_lst[best_acc_indx]
    print "The best learning rate is {} for accuracy: {}".format(
        best_lr, max(validating_acc_lst))
    print "part a - done"
    return best_lr
Esempio n. 2
0
def train_and_test(train_x, test_x, training_class, testing_class, kernel):
    classes = Counter(training_class)
    classes = classes.keys()
    total_accuracy = []
    for label in classes:
        train_y = []
        for t in training_class:
            if t == label:
                train_y.append(1.0)
            else:
                train_y.append(-1.0)
        train_y = np.array(train_y)

        test_y = []
        for t in testing_class:
            if t == label:
                test_y.append(1.0)
            else:
                test_y.append(-1.0)
        test_y = np.array(test_y)

        classfier = SVM(kernel=kernel, C=0.1)
        classfier.train(train_x, train_y)
        y_predict = classfier.test(test_x)
        correct = np.sum(y_predict == test_y)
        print("%d out of %d predictions correct" % (correct, len(y_predict)))
        accuracy = correct / len(y_predict)
        print("accuracy is {}".format(accuracy))
        total_accuracy.append(accuracy)
    mean_accuracy = np.mean(np.array(total_accuracy))
    print('mean accuracy is {}'.format(mean_accuracy))

    return mean_accuracy
Esempio n. 3
0
def svmTest(feature_len, all_lines, all_features, all_labels):
    counts = {}
    for i in range(10):
        rate = 0
        print("Test %d:" % (i + 1))
        train_features = all_features[0:int(0.8 * len(all_features))]
        train_labels = all_labels[0:int(0.8 * len(all_features))]
        test_features = all_features[int(0.8 * len(all_features)):]
        test_labels = all_labels[int(0.8 * len(all_features)):]
        length = len(test_labels)
        for C in range(50, 61, 1):
            rate = 0
            new_svm = SVM(train_features,
                          train_labels,
                          C=C,
                          function='RBF',
                          d=0.53)
            # print("Train:")
            new_svm.train()
            # print("\nPredict:", end = "\n")
            for j in range(0, length):
                res = new_svm.predict(test_features[j])
                if res == test_labels[j]:
                    rate += 1
            print("C = %f: " % C, end=" ")
            print(rate / length)
            if C not in counts:
                counts[C] = rate / length
            else:
                counts[C] += rate / length
        all_features, all_labels = now_provider.getFeatureAndLabel(
            all_lines, feature_len)
    for x, y in counts:
        print(x, y)
Esempio n. 4
0
def part_b(org_train, org_train_labels, best_lr):
    """
    This function implements part B
    :return: best learning rate
    """
    print "part b - start"
    c_lst = np.array(list(range(1, 999, 10))).astype("float32") / 1000.0

    validating_acc_lst = []
    for c in c_lst:
        mean_acc = 0
        for i in range(10):
            svm = SVM(org_train.shape[1])
            svm.train(org_train, org_train_labels, best_lr, C=c, T=1000)
            mean_acc += svm.test(org_validation, org_validation_labels)
        validating_acc_lst.append(mean_acc / (i + 1))
    plt.figure()
    plot_graph(validating_acc_lst, c_lst, "q3_part_b", "",
               "Accuracy vs C for SVM", "Accuracy", "C")
    best_acc_indx = validating_acc_lst.index(max(validating_acc_lst))
    best_c = c_lst[best_acc_indx]
    print "The best C is {} for accuracy: {}".format(best_c,
                                                     max(validating_acc_lst))
    print "part b - done"
    return best_c
Esempio n. 5
0
def part_c(org_train, org_train_labels, best_c, best_lr):
    """
    This function implements part C
    :param best_c: Best C
    :param best_lr: Best Learning rate
    :return: the SVM
    """
    print "part c - start"
    svm = SVM(org_train.shape[1])
    svm.train(org_train, org_train_labels, best_lr, C=best_c, T=20000)

    # Save the weights image
    plt.figure()
    plt.imshow(np.reshape(svm.get_weights(), (28, 28)),
               interpolation='nearest')
    print "part c - done"
    return svm
Esempio n. 6
0
File: GA.py Progetto: ma853529615/12
def SVMResult(vardim, x, bound, dataset):
    X = dataset.loc[dataset['split'] == 'train'].iloc[:, 0:-2].values
    y = dataset.loc[dataset['split'] == 'train'].iloc[:, -2].values
    val_X = dataset.loc[dataset['split'] == 'val'].iloc[:, 0:-2].values
    val_y = dataset.loc[dataset['split'] == 'val'].iloc[:, -2].values
    c = abs(x[0])
    g = abs(x[1])
    # f = x[2]#四参数
    svm = SVM(C=c, gamma=g)
    predictor = svm.train(X, y)
    y_bar = predictor.predict_vec(val_X)

    return score(y_bar, val_y)
Esempio n. 7
0
def main():
    dim = 2
    N = 100
    np.random.seed(1000)
    data1 = np.random.multivariate_normal([-5, 10], [[2, 0], [0, 2]],
                                          int(N / 2))
    draw(data1[:, 0], data1[:, 1], "red")
    data2 = np.random.multivariate_normal([2, 2], [[2, 0], [0, 2]], int(N / 2))
    draw(data2[:, 0], data2[:, 1], "blue")
    # plt.show()
    X = np.empty(shape=[0, dim])
    X = np.append(X, data1, axis=0)
    X = np.append(X, data2, axis=0)
    iter = 100
    svm = SVM(X, iter)
    svm.train()
    print("train over")
    X, Y = svm.decision_boundary(0)
    drawline(X, Y, "black", '--')
    X, Y = svm.decision_boundary(+1)
    drawline(X, Y, "red", '-')
    X, Y = svm.decision_boundary(-1)
    drawline(X, Y, "blue", '-')
    # X,Y = svm.support_vector()
    # draw(X,Y,"black","x")
    plt.show()

    # history_margin = svm.history_margin
    # print(history_margin)
    # drawline(np.arange(0,iter,1), history_margin, "blue", '-')
    # plt.show()

    history_loss = svm.history_loss
    print(history_loss)
    drawline(np.arange(0, iter, 1), history_loss, "blue", '-')
    plt.show()
Esempio n. 8
0
def transfer_learning(print_output=True):
    path = 'datasets/'
    data_loader = DataLoader(path)
    names = {1: 'title', 4: 'abstract', 5: 'mesh', 'y': 6}
    transformed_data_sets = []

    path = 'datasets/'

    files = [f for f in listdir(path) if isfile(join(path,f))]
    files.pop(0)
    data_loader = DataLoader(path)
    domains = data_loader.csv_files
    all_domains = copy.deepcopy(domains)
    training_domains = data_loader.csv_files
    all_domains_svm_wda_metrics_list = []
    all_domains_svm_metrics_list = []
    all_domains_svm_bow_mlp_list = []
    all_domains_mlp_fold_scores = []

    for i, held_out_domain in enumerate(domains):
        training_domains.pop(i)
        names = {1: 'title', 4: 'abstract', 5: 'mesh', 'y': 6}
        svm_wda_metrics_list = []
        svm_metrics_list = []
        svm_bow_mlp_list = []

        folder_name = '/' + files[i]
        domain_name = files[i].__str__()
        domain_name = domain_name.split('.')[0]
        folder_name = 'output' + '/' + domain_name

        output = "Dataset: {}".format(files[i])
        if print_output:
            print(output)

        #shuffle(data_loader.csv_files)
        data_loader.csv_files = training_domains
        data_sets = data_loader.csv_files
        domains = data_loader.get_feature_matrix(names)

        #Get one file out of the csv files in the dataloader use this as the held out domain

        #Get the feature representation of the held out data
        held_out_x, held_out_y = data_loader.get_feature_matrix(names, held_out_domain)
        #Create the folds for the held out data in this case the default 5
        folds = data_loader.cross_fold_valdation(held_out_x, held_out_y)
        #Get the total number of domains i.e., the number of files with documents
        n_source_domains = len(data_sets)
        os.makedirs(folder_name)

        #Must convert the data type of the matrix for theano
        feature_engineer = Feature_Engineer()

        #Start the 5 fold cross validation
        for n_fold, fold in enumerate(folds):
            output = "Fold {}: \n".format(n_fold)
            if print_output:
                print(output)
            output = '{}/{}/fold_{}.csv'.format(os.getcwd(), folder_name, (n_fold + 1))
            file = open(output, 'w')
            csv_writer = csv.writer(file)

            #Each sample is a list that contains the x and y for the classifier
            #Typically fold[0] would be the train sample but because it is switched for
            #testing the effectiveness of the domain adaptation
            train_sample = fold[1]
            test_sample = fold[0]

            #These are the original copies to be copied over the augmented feature matrix
            #Each sample contains the text and y labels from the data before it is put into the sklearn count vectorizer
            train_x, train_y = train_sample
            test_x, test_y = test_sample

            train_y[train_y == 0] = 2
            train_y[train_y == 1] = 3
            test_y[test_y == 0] = 2
            test_y[test_y == 1] = 3


            #Get the bag of words representation of the small 20% target source data and transform the other 80%
            #of the data.
            train_x = data_loader.get_transformed_features(train_x, True, False, True)
            test_x = data_loader.transform(test_x, True, True)

            transformed_domains = []

            #Transform the domains with respect to the training data
            for domain in domains:
                domain_x, domain_y = domain
                transformed_domain_x = data_loader.transform(domain_x, True, True)
                transformed_domain_x, domain_y = data_loader.underSample(transformed_domain_x, domain_y)
                transformed_domains.append([transformed_domain_x, domain_y])

            augmented_feature_matrix_train, augmented_y_train = feature_engineer.augmented_feature_matrix(transformed_domains,
                                                                                              [train_x, train_y])
            augmented_feature_matrix_test, augmented_y_test = feature_engineer.augmented_feature_matrix(held_out_domain=[test_x, test_y],
                                                                                                        train_or_test=False,
                                                                                                        n_source_domains=len(transformed_domains))
            augmented_y_test[augmented_y_test == 2] = 0
            augmented_y_test[augmented_y_test == 3] = 1
            #SVM with the augmented feature matrix for domain adaptation
            svm_wda = SVM()
            svm_wda.train(augmented_feature_matrix_train, augmented_y_train)
            svm_wda.test(augmented_feature_matrix_test, augmented_y_test)
            output = "\nSVM with domain adaptation metrics:"
            csv_writer.writerow([output])
            if print_output:
                print(output)
                print(svm_wda)
                print("\n")
            svm_wda_metrics_list.append(svm_wda.metrics)

            classifier = NeuralNet(n_hidden_units=[250], output_size=4, batch_size=20, n_epochs=200, dropout=True,
                                   activation_function='relu', learning_rate=.3, momentum=True, momentum_term=.5)
            write_to_csv(svm_wda.metrics, csv_writer)


            y_for_mlp = []
            #Set up the x and y data for the MLP
            for p, domain in enumerate(transformed_domains):
                domain_x, domain_y = domain
                domain_x = domain_x.todense()
                y_for_mlp.append(domain_y)

                if p == 0:
                    neural_net_x_train = domain_x
                    neural_net_y_train = domain_y
                else:
                    neural_net_x_train = numpy.vstack((neural_net_x_train, domain_x))
                    neural_net_y_train = numpy.hstack((neural_net_y_train, domain_y))

            neural_net_x_train = numpy.float_(neural_net_x_train)


            classifier.train(neural_net_x_train, neural_net_y_train)

            test_y[test_y == 2] = 0
            test_y[test_y == 3] = 1
            svm_y_train = neural_net_y_train
            svm_y_train[svm_y_train == 2] = 0
            svm_y_train[svm_y_train == 3] = 1

            #SVM without the domain adaptation
            svm = SVM()
            svm.train(sparse.coo_matrix(neural_net_x_train), svm_y_train)
            svm.test(test_x, test_y)
            output = "\nSVM without domain adaptation"
            if print_output:
                print(output)
                print(svm)
                print("\n")
            csv_writer.writerow([output])
            svm_metrics_list.append(svm.metrics)
            write_to_csv(svm.metrics, csv_writer)


            #Transform the feature vectors of the held out data to the learned hidden layer features of the previous
            #MLP trained with all n-1 datasets

            perceptron_train_x = theano.shared(neural_net_x_train)
            perceptron_test_x = theano.shared(test_x.todense())

            transformed_perceptron_train_x = classifier.transfer_learned_weights(perceptron_train_x)
            transformed_perceptron_test_x = classifier.transfer_learned_weights(perceptron_test_x)

            modified_transformed_perceptron_train_x = numpy.hstack((transformed_perceptron_train_x,
                                                                    neural_net_x_train))
            modified_transformed_perceptron_test_x = numpy.hstack((transformed_perceptron_test_x,
                                                                   test_x.todense()))

            output = "\nSVM with BoW and transformed features"
            csv_writer.writerow([output])
            if print_output:
                print(output)
            svm_mlp_bow = SVM()
            svm_mlp_bow.train(sparse.coo_matrix(modified_transformed_perceptron_train_x), svm_y_train)
            svm_mlp_bow.test(sparse.coo_matrix(modified_transformed_perceptron_test_x), test_y)
            write_to_csv(svm_mlp_bow.metrics, csv_writer)
            if print_output:
                print(svm_mlp_bow)
            svm_bow_mlp_list.append(svm_mlp_bow.metrics)


            output = "*********** End of fold {} ***********".format(n_fold)

            if print_output:
                print(output)


        training_domains = copy.deepcopy(all_domains)
        file_name = '{}/{}/fold_averages.csv'.format(os.getcwd(), folder_name)
        file = open(file_name, 'w+')
        csv_writer = csv.writer(file)

        if print_output:
            output = "----------------------------------------------------------------------------------------" \
                     "\nFold Scores\n " \
                     "SVM with domain adaptation"
            print_write_output(output, svm_wda_metrics_list, all_domains_svm_wda_metrics_list, csv_writer)

            output = "\nSVM without domain adaptation"
            print_write_output(output, svm_metrics_list, all_domains_svm_metrics_list, csv_writer)

            output = "SVM with BoW and transformed features"
            print_write_output(output, svm_bow_mlp_list, all_domains_svm_bow_mlp_list, csv_writer)



    file_name = '{}/output/all_fold_averages.csv'.format(os.getcwd())
    file = open(file_name, 'w+')
    csv_writer = csv.writer(file)
    if print_output:
        output = "*******************************************************************************************" \
                 "\nAll domain macro metric scores\n " \
                 "SVM with domain adaptation"
        print_macro_scores("SVM with domain adaptation", all_domains_svm_wda_metrics_list, csv_writer)

        output = "\nSVM without domain adaptation"
        print_macro_scores(output, all_domains_svm_metrics_list, csv_writer)

        output = "SVM with BoW and transformed features"
        print_macro_scores(output, all_domains_svm_bow_mlp_list, csv_writer)
Esempio n. 9
0
def main():
    dm_model = Doc2Vec.load('400_pvdm_doc2vec.d2v')
    dbow_model = Doc2Vec.load('400_pvdbow_doc2vec.d2v')

    #Load datasets for classfying
    path = 'datasets/'
    doc2vec_vector_size = 400
    files = [f for f in listdir(path) if isfile(join(path,f))]
    files.pop(0)

    data_loader = DataLoader(path)

    domains = data_loader.csv_files


    names = {1: 'title', 4: 'abstract', 5: 'mesh', 'y': 6}

    domain_features = data_loader.get_feature_matrix(names)
    domain = domain_features.pop(0)
    x, y = domain
    #get size
    n_total_documents = 0

    for domain in domain_features:
        n_total_documents+=len(domain[0])
        x = numpy.hstack((x, domain[0]))
        y = numpy.hstack((y, domain[1]))
    x, y = data_loader.create_random_samples(x, y, train_p=.8, test_p=.2)
    train_x, test_x = x
    train_y, test_y = y
    transformed_train_x = data_loader.get_transformed_features(train_x, sparse=True, tfidf=True, add_index_vector=False)
    transformed_test_x = data_loader.get_transformed_features(test_x, sparse=True, tfidf=True)
    all_features = numpy.zeros(shape=(n_total_documents, 800))
    all_labels = numpy.asarray([])

    i = 0

    dbow_dm_train_x = numpy.zeros((train_x.shape[0], 2*doc2vec_vector_size))
    dbow_dm_test_x = numpy.zeros((test_x.shape[0], 2*doc2vec_vector_size))

    """
        Set up the feature for the SVM by iterating through all the word vectors.
        Pre process each vector and then feed into doc2vec model, both the distributed memory
        and distributed bag of words. Concatenate the vectors for better classification results
        as per paragraph to vector paper by Mikolv.
    """
    for feature_vector in train_x:
        preprocessed_line = list(Doc2vec.Doc2VecTool.preprocess_line(feature_vector))
        dbow_dm_train_x[i, 0:400] = dm_model.infer_vector(preprocessed_line)
        dbow_dm_train_x[i, 400:] = dbow_model.infer_vector(preprocessed_line)
        i+=1

    """
        Do the same as above but for the test set.
    """

    i = 0

    for feature_vector in test_y:
        preprocessed_line = list(Doc2vec.Doc2VecTool.preprocess_line(feature_vector))
        dbow_dm_test_x[i, 0:400] = dm_model.infer_vector(preprocessed_line)
        dbow_dm_test_x[i, 400:] = dbow_model.infer_vector(preprocessed_line)
        i+=1

    print("Training doc2vec SVM")
    #Train SVM on classic bow
    svm = SVM()
    svm.train(dbow_dm_train_x, train_y)
    svm.test(dbow_dm_test_x, test_y)
    print("end of training doc2vec bow SVM\n")


    print("Training classic bow SVM")
    #Train SVM on classic bow
    svm = SVM()
    svm.train(transformed_train_x, train_y)
    svm.test(transformed_test_x, test_y)
    print("end of training classic bow SVM\n")
Esempio n. 10
0
data = genfromtxt('train.txt',delimiter=',',dtype=str)
X = data[:,CONTINOUS]
y = data[:,TAG]
y[y==" <=50K"] = -1.0
y[y==" >50K"]  = 1.0
X = X.astype(float)
X = X - np.mean(X, axis=0)
X /= np.std(X, axis=0)
data = np.hstack((X,y.reshape(y.shape[0],1))).astype(float)
np.random.shuffle(data)
train_size  = math.ceil(data.shape[0]*0.9)
train       = data[:train_size,:]
validation  = data[train_size:,:]

classifier = SVM(dims=6, reg=0.0001)
classifier.train(data=train, seasons=50)
classifier.plot_all(reg="0.0001")
classifier.evaluate(validation)

classifier = SVM(dims=6, reg=0.001)
classifier.train(data=train, seasons=50)
classifier.plot_all(reg="0.001")
classifier.evaluate(validation)

classifier = SVM(dims=6, reg=0.01)
classifier.train(data=train, seasons=50)
classifier.plot_all(reg="0.1")
classifier.evaluate(validation)

TestData = genfromtxt('test.txt',delimiter=',',dtype=str)
X = TestData[:,CONTINOUS]
def train_and_test(C, tolerance):
    svm = SVM(C=C, tolerance=tolerance)
    svm.train()
    acc = svm.test()
    # print(os.getpid(), acc)
    return [acc, svm]
Esempio n. 12
0
@Email   : [email protected]
@File    : main.py
'''

import cv2
from SVM import SVM
from prepare import prepare_data

cell_class = {11:'EOSINOPHIL',
              22:'LYMPHOCYTE',
              33:'MONOCYTE',
              44:'NEUTROPHIL'}

types = ['hog', 'gray', 'rgb', 'hsv']
feature_type = types[3]

img_path = './data/test_data/LYMPHOCYTE/_0_1050.jpeg'  #adjust the test image
img=cv2.imread(img_path)
cv2.putText(img,'LYMPHOCYTE',(23,45),cv2.FONT_HERSHEY_COMPLEX,0.6,(0,0,255),1)
cv2.imshow('Result',img)
cv2.waitKey()

svm=SVM()
data=prepare_data(feature_type)
print('data:',data)

svm.train(data)
img=cv2.imread(img_path)
ID_num=svm.predict(img,feature_type)

Esempio n. 13
0
def transfer_learning(print_output=True):
    path = 'datasets/'
    data_loader = DataLoader(path)
    names = {1: 'title', 4: 'abstract', 5: 'mesh', 'y': 6}
    transformed_data_sets = []

    path = 'datasets/'

    files = [f for f in listdir(path) if isfile(join(path, f))]
    files.pop(0)
    data_loader = DataLoader(path)
    domains = data_loader.csv_files
    all_domains = copy.deepcopy(domains)
    training_domains = data_loader.csv_files
    all_domains_svm_wda_metrics_list = []
    all_domains_svm_metrics_list = []
    all_domains_svm_bow_mlp_list = []
    all_domains_mlp_fold_scores = []

    for i, held_out_domain in enumerate(domains):
        training_domains.pop(i)
        names = {1: 'title', 4: 'abstract', 5: 'mesh', 'y': 6}
        svm_wda_metrics_list = []
        svm_metrics_list = []
        svm_bow_mlp_list = []

        folder_name = '/' + files[i]
        domain_name = files[i].__str__()
        domain_name = domain_name.split('.')[0]
        folder_name = 'output' + '/' + domain_name

        output = "Dataset: {}".format(files[i])
        if print_output:
            print(output)

        #shuffle(data_loader.csv_files)
        data_loader.csv_files = training_domains
        data_sets = data_loader.csv_files
        domains = data_loader.get_feature_matrix(names)

        #Get one file out of the csv files in the dataloader use this as the held out domain

        #Get the feature representation of the held out data
        held_out_x, held_out_y = data_loader.get_feature_matrix(
            names, held_out_domain)
        #Create the folds for the held out data in this case the default 5
        folds = data_loader.cross_fold_valdation(held_out_x, held_out_y)
        #Get the total number of domains i.e., the number of files with documents
        n_source_domains = len(data_sets)
        os.makedirs(folder_name)

        #Must convert the data type of the matrix for theano
        feature_engineer = Feature_Engineer()

        #Start the 5 fold cross validation
        for n_fold, fold in enumerate(folds):
            output = "Fold {}: \n".format(n_fold)
            if print_output:
                print(output)
            output = '{}/{}/fold_{}.csv'.format(os.getcwd(), folder_name,
                                                (n_fold + 1))
            file = open(output, 'w')
            csv_writer = csv.writer(file)

            #Each sample is a list that contains the x and y for the classifier
            #Typically fold[0] would be the train sample but because it is switched for
            #testing the effectiveness of the domain adaptation
            train_sample = fold[1]
            test_sample = fold[0]

            #These are the original copies to be copied over the augmented feature matrix
            #Each sample contains the text and y labels from the data before it is put into the sklearn count vectorizer
            train_x, train_y = train_sample
            test_x, test_y = test_sample

            train_y[train_y == 0] = 2
            train_y[train_y == 1] = 3
            test_y[test_y == 0] = 2
            test_y[test_y == 1] = 3

            #Get the bag of words representation of the small 20% target source data and transform the other 80%
            #of the data.
            train_x = data_loader.get_transformed_features(
                train_x, True, False, True)
            test_x = data_loader.transform(test_x, True, True)

            transformed_domains = []

            #Transform the domains with respect to the training data
            for domain in domains:
                domain_x, domain_y = domain
                transformed_domain_x = data_loader.transform(
                    domain_x, True, True)
                transformed_domain_x, domain_y = data_loader.underSample(
                    transformed_domain_x, domain_y)
                transformed_domains.append([transformed_domain_x, domain_y])

            augmented_feature_matrix_train, augmented_y_train = feature_engineer.augmented_feature_matrix(
                transformed_domains, [train_x, train_y])
            augmented_feature_matrix_test, augmented_y_test = feature_engineer.augmented_feature_matrix(
                held_out_domain=[test_x, test_y],
                train_or_test=False,
                n_source_domains=len(transformed_domains))
            augmented_y_test[augmented_y_test == 2] = 0
            augmented_y_test[augmented_y_test == 3] = 1
            #SVM with the augmented feature matrix for domain adaptation
            svm_wda = SVM()
            svm_wda.train(augmented_feature_matrix_train, augmented_y_train)
            svm_wda.test(augmented_feature_matrix_test, augmented_y_test)
            output = "\nSVM with domain adaptation metrics:"
            csv_writer.writerow([output])
            if print_output:
                print(output)
                print(svm_wda)
                print("\n")
            svm_wda_metrics_list.append(svm_wda.metrics)

            classifier = NeuralNet(n_hidden_units=[250],
                                   output_size=4,
                                   batch_size=20,
                                   n_epochs=200,
                                   dropout=True,
                                   activation_function='relu',
                                   learning_rate=.3,
                                   momentum=True,
                                   momentum_term=.5)
            write_to_csv(svm_wda.metrics, csv_writer)

            y_for_mlp = []
            #Set up the x and y data for the MLP
            for p, domain in enumerate(transformed_domains):
                domain_x, domain_y = domain
                domain_x = domain_x.todense()
                y_for_mlp.append(domain_y)

                if p == 0:
                    neural_net_x_train = domain_x
                    neural_net_y_train = domain_y
                else:
                    neural_net_x_train = numpy.vstack(
                        (neural_net_x_train, domain_x))
                    neural_net_y_train = numpy.hstack(
                        (neural_net_y_train, domain_y))

            neural_net_x_train = numpy.float_(neural_net_x_train)

            classifier.train(neural_net_x_train, neural_net_y_train)

            test_y[test_y == 2] = 0
            test_y[test_y == 3] = 1
            svm_y_train = neural_net_y_train
            svm_y_train[svm_y_train == 2] = 0
            svm_y_train[svm_y_train == 3] = 1

            #SVM without the domain adaptation
            svm = SVM()
            svm.train(sparse.coo_matrix(neural_net_x_train), svm_y_train)
            svm.test(test_x, test_y)
            output = "\nSVM without domain adaptation"
            if print_output:
                print(output)
                print(svm)
                print("\n")
            csv_writer.writerow([output])
            svm_metrics_list.append(svm.metrics)
            write_to_csv(svm.metrics, csv_writer)

            #Transform the feature vectors of the held out data to the learned hidden layer features of the previous
            #MLP trained with all n-1 datasets

            perceptron_train_x = theano.shared(neural_net_x_train)
            perceptron_test_x = theano.shared(test_x.todense())

            transformed_perceptron_train_x = classifier.transfer_learned_weights(
                perceptron_train_x)
            transformed_perceptron_test_x = classifier.transfer_learned_weights(
                perceptron_test_x)

            modified_transformed_perceptron_train_x = numpy.hstack(
                (transformed_perceptron_train_x, neural_net_x_train))
            modified_transformed_perceptron_test_x = numpy.hstack(
                (transformed_perceptron_test_x, test_x.todense()))

            output = "\nSVM with BoW and transformed features"
            csv_writer.writerow([output])
            if print_output:
                print(output)
            svm_mlp_bow = SVM()
            svm_mlp_bow.train(
                sparse.coo_matrix(modified_transformed_perceptron_train_x),
                svm_y_train)
            svm_mlp_bow.test(
                sparse.coo_matrix(modified_transformed_perceptron_test_x),
                test_y)
            write_to_csv(svm_mlp_bow.metrics, csv_writer)
            if print_output:
                print(svm_mlp_bow)
            svm_bow_mlp_list.append(svm_mlp_bow.metrics)

            output = "*********** End of fold {} ***********".format(n_fold)

            if print_output:
                print(output)

        training_domains = copy.deepcopy(all_domains)
        file_name = '{}/{}/fold_averages.csv'.format(os.getcwd(), folder_name)
        file = open(file_name, 'w+')
        csv_writer = csv.writer(file)

        if print_output:
            output = "----------------------------------------------------------------------------------------" \
                     "\nFold Scores\n " \
                     "SVM with domain adaptation"
            print_write_output(output, svm_wda_metrics_list,
                               all_domains_svm_wda_metrics_list, csv_writer)

            output = "\nSVM without domain adaptation"
            print_write_output(output, svm_metrics_list,
                               all_domains_svm_metrics_list, csv_writer)

            output = "SVM with BoW and transformed features"
            print_write_output(output, svm_bow_mlp_list,
                               all_domains_svm_bow_mlp_list, csv_writer)

    file_name = '{}/output/all_fold_averages.csv'.format(os.getcwd())
    file = open(file_name, 'w+')
    csv_writer = csv.writer(file)
    if print_output:
        output = "*******************************************************************************************" \
                 "\nAll domain macro metric scores\n " \
                 "SVM with domain adaptation"
        print_macro_scores("SVM with domain adaptation",
                           all_domains_svm_wda_metrics_list, csv_writer)

        output = "\nSVM without domain adaptation"
        print_macro_scores(output, all_domains_svm_metrics_list, csv_writer)

        output = "SVM with BoW and transformed features"
        print_macro_scores(output, all_domains_svm_bow_mlp_list, csv_writer)
Esempio n. 14
0
def compareTest(feature_len, all_lines, all_features, all_labels):
    count = {}
    for i in range(10):
        print("\nTest %d" % (i + 1))
        train_features = all_features[0:int(0.8 * len(all_features))]
        train_labels = all_labels[0:int(0.8 * len(all_features))]
        test_features = all_features[int(0.8 * len(all_features)):]
        test_labels = all_labels[int(0.8 * len(all_features)):]
        length = len(test_labels)

        rate = 0
        print("NaiveBayes : ", end="")
        new_bayes = NaiveBayes(train_features, train_labels, feature_len)
        new_bayes.train()
        for j in range(0, length):
            res = new_bayes.predict(test_features[j])
            if res == test_labels[j]:
                rate += 1
        print(rate / length)
        if "NaiveBayes" not in count:
            count["NaiveBayes"] = rate / length
        else:
            count["NaiveBayes"] += rate / length

        rate = 0
        print("KNN : ", end="")
        for j in range(0, length):
            res = Knn(train_features, train_labels, test_features[j], 3)
            if res == test_labels[j]:
                rate += 1
        print(rate / length)
        if "KNN" not in count:
            count["KNN"] = rate / length
        else:
            count["KNN"] += rate / length

        rate = 0
        print("Logistic : ", end="")
        new_logistic = Logistic(train_features,
                                train_labels,
                                feature_len,
                                alpha=5,
                                tol=0.000001)
        new_logistic.train()
        for j in range(0, length):
            res = new_logistic.predict(test_features[j])
            if res == test_labels[j]:
                rate += 1
        print(rate / length)
        if "Logistic" not in count:
            count["Logistic"] = rate / length
        else:
            count["Logistic"] += rate / length

        rate = 0
        print("NeuralNetwork : ", end="")
        new_NN = NeuralNetwork(train_features,
                               train_labels,
                               feature_len,
                               hidden_num=32,
                               learn_rate=100)
        new_NN.train()
        for j in range(0, length):
            res = new_NN.predict(test_features[j])
            if res == test_labels[j]:
                rate += 1
        print(rate / length)
        if "NeuralNetwork" not in count:
            count["NeuralNetwork"] = rate / length
        else:
            count["NeuralNetwork"] += rate / length

        rate = 0
        print("Tree : ", end="")
        new_tree = Tree(train_features, train_labels, len(train_features[0]),
                        3, 8)
        new_tree.train()
        for j in range(0, length):
            res = new_tree.predictTree(test_features[j])
            if res == test_labels[j]:
                rate += 1
        print(rate / length)
        if "Tree" not in count:
            count["Tree"] = rate / length
        else:
            count["Tree"] += rate / length

        rate = 0
        print("AdaBoost : ", end="")
        new_boost = AdaBoost(train_features,
                             train_labels,
                             len(train_features[0]),
                             28,
                             mode=2)
        new_boost.train()
        for j in range(0, length):
            res = new_boost.predict(test_features[j])
            if res == test_labels[j]:
                rate += 1
        print(rate / length)
        if "AdaBoost" not in count:
            count["AdaBoost"] = rate / length
        else:
            count["AdaBoost"] += rate / length

        rate = 0
        print("RandomForest : ", end="")
        new_forest = RandomForest(30)
        new_forest.buildTrees(train_features, train_labels,
                              len(train_features[0]), 3, 6)
        for j in range(0, length):
            res = new_forest.predictForest(test_features[j])
            if res == test_labels[j]:
                rate += 1
        print(rate / length)
        if "RandomForest" not in count:
            count["RandomForest"] = rate / length
        else:
            count["RandomForest"] += rate / length

        rate = 0
        print("SVM : ", end="")
        new_svm = SVM(train_features,
                      train_labels,
                      C=43,
                      function='RBF',
                      d=0.53)
        new_svm.train()
        for j in range(0, length):
            res = new_svm.predict(test_features[j])
            if res == test_labels[j]:
                rate += 1
        print(rate / length)
        if "SVM" not in count:
            count["SVM"] = rate / length
        else:
            count["SVM"] += rate / length

        all_features, all_labels = now_provider.getFeatureAndLabel(
            all_lines, feature_len)

    print("\nAverage:")
    for x in count:
        print(x, end=": ")
        print(count[x] / 10)
Esempio n. 15
0
name = 'stdev2'
print '======Training======'
# load data from csv files
train = loadtxt('newData-2/data_' + name + '_train.csv')
#train = loadtxt('data/data_'+name+'_train.csv')
# use deep copy here to make cvxopt happy
X = train[:, 0:2].copy()
Y = train[:, 2:3].copy()

#X = np.array([[1.0,2.0],[2.0,2.0],[0.0,0.0],[-2.0,3.0]])
#Y = np.array([[1.0],[1.0],[-1.0],[-1.0]])

# Carry out training, primal and/or dual
C = 1
svm = SVM(X, Y, C)
svm.train()

#model = svm.train_gold()


# Define the predictSVM(x) function, which uses trained parameters
def predictSVM(x):
    return svm.test(x)
    #return svm.test_gold(x,model)


# plot training results
plotDecisionBoundary(X, Y, predictSVM, [-1, 0, 1], title='SVM Train')

print '======Validation======'
# load data from csv files
Esempio n. 16
0
    learning_rate = [1e-5, 5e-5, 1e-6, 7e-6, 1e-7, 3e-7, 1e-8]
    regularization_strength = [1e1, 1e2, 1e3, 1e4, 5e4, 1e5, 3e5, 1e6, 1e7]

    max_acc = -1.0
    for cs in cellsize:
        #提取训练集和验证集的HOG特征
        hog_train = hog_extraction(x_train, size=cs)
        hog_val = hog_extraction(x_val, size=cs)

        for lr in learning_rate:
            for rs in regularization_strength:
                svm = SVM()
                #训练
                history_loss = svm.train(hog_train,
                                         y_train,
                                         reg=rs,
                                         learning_rate=lr,
                                         num_iters=2000)
                #预测验证集类别
                y_pre = svm.predict(hog_val)
                #计算验证集精度
                acc = np.mean(y_pre == y_val)

                #选取精度最大时的最优模型
                if (acc > max_acc):
                    max_acc = acc
                    best_learning_rate = lr
                    best_regularization_strength = rs
                    best_cellsize = cs
                    best_svm = svm
Esempio n. 17
0

(x_train, y_train), (x_test, y_test) = read_MNIST()

#result for the row data
index, _ = choose_k(x_train, y_train, x_test, y_test, 5, 8)
print index
knn = KNN(index)
knn.train(x_train, y_train)
acc, pred = knn.accuracy(x_test, y_test)
print("KNN acc on the original data", acc)
plot_confusion_matrix(y_test, pred, title='KNN Confusion matrix original data')
plt.show()
print("knn report", metrics.classification_report(y_test, pred))
s = SVM()
s.train(x_train, y_train, x_test, y_test)
acc, _, pred = s.predict(x_test, y_test)
print("svm acc on original data", acc)
plot_confusion_matrix(y_test, pred, title='svm Confusion matrix')
plt.show()
print("svm report", metrics.classification_report(y_test, pred))

#Dimension reduction using PCA
start = timeit.default_timer()
pca = PCA(rid_dim)
(x_train_trans, x_test_trans) = pca.fit_transform(x_train, x_test)
stop = timeit.default_timer()
print('Time: ', stop - start)
#KNN
index, _ = choose_k(x_train_trans, y_train, x_test_trans, y_test, 5, 8)
print index
Esempio n. 18
0
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1]]
X=np.array(X).transpose()
print X.shape


y=np.array(y).flatten(1)
y[y==0]=-1
print y.shape

svms=SVM(X,y)
svms.train()
print len(svms.supportVector)
for i in range(len(svms.supportVector)):
	t=svms.supportVector[i]
	print svms.x[:,t]
svms.prints_test_linear()
Esempio n. 19
0
def main():
    dm_model = Doc2Vec.load('400_pvdm_doc2vec.d2v')
    dbow_model = Doc2Vec.load('400_pvdbow_doc2vec.d2v')

    #Load datasets for classfying
    path = 'datasets/'
    doc2vec_vector_size = 400
    files = [f for f in listdir(path) if isfile(join(path, f))]
    files.pop(0)

    data_loader = DataLoader(path)

    domains = data_loader.csv_files

    names = {1: 'title', 4: 'abstract', 5: 'mesh', 'y': 6}

    domain_features = data_loader.get_feature_matrix(names)
    domain = domain_features.pop(0)
    x, y = domain
    #get size
    n_total_documents = 0

    for domain in domain_features:
        n_total_documents += len(domain[0])
        x = numpy.hstack((x, domain[0]))
        y = numpy.hstack((y, domain[1]))
    x, y = data_loader.create_random_samples(x, y, train_p=.8, test_p=.2)
    train_x, test_x = x
    train_y, test_y = y
    transformed_train_x = data_loader.get_transformed_features(
        train_x, sparse=True, tfidf=True, add_index_vector=False)
    transformed_test_x = data_loader.get_transformed_features(test_x,
                                                              sparse=True,
                                                              tfidf=True)
    all_features = numpy.zeros(shape=(n_total_documents, 800))
    all_labels = numpy.asarray([])

    i = 0

    dbow_dm_train_x = numpy.zeros((train_x.shape[0], 2 * doc2vec_vector_size))
    dbow_dm_test_x = numpy.zeros((test_x.shape[0], 2 * doc2vec_vector_size))
    """
        Set up the feature for the SVM by iterating through all the word vectors.
        Pre process each vector and then feed into doc2vec model, both the distributed memory
        and distributed bag of words. Concatenate the vectors for better classification results
        as per paragraph to vector paper by Mikolv.
    """
    for feature_vector in train_x:
        preprocessed_line = list(
            Doc2vec.Doc2VecTool.preprocess_line(feature_vector))
        dbow_dm_train_x[i, 0:400] = dm_model.infer_vector(preprocessed_line)
        dbow_dm_train_x[i, 400:] = dbow_model.infer_vector(preprocessed_line)
        i += 1
    """
        Do the same as above but for the test set.
    """

    i = 0

    for feature_vector in test_y:
        preprocessed_line = list(
            Doc2vec.Doc2VecTool.preprocess_line(feature_vector))
        dbow_dm_test_x[i, 0:400] = dm_model.infer_vector(preprocessed_line)
        dbow_dm_test_x[i, 400:] = dbow_model.infer_vector(preprocessed_line)
        i += 1

    print("Training doc2vec SVM")
    #Train SVM on classic bow
    svm = SVM()
    svm.train(dbow_dm_train_x, train_y)
    svm.test(dbow_dm_test_x, test_y)
    print("end of training doc2vec bow SVM\n")

    print("Training classic bow SVM")
    #Train SVM on classic bow
    svm = SVM()
    svm.train(transformed_train_x, train_y)
    svm.test(transformed_test_x, test_y)
    print("end of training classic bow SVM\n")
Esempio n. 20
0
def get_svm_model(parameter, X, y):
    svm = SVM()
    loss_history = svm.train(X, y, parameter[1], 1, parameter[0], 200, 1500, True)
    Visualizeloss(loss_history)
    input('Enter any key to predict...')
    return svm
def main():



    print("Loading data...")
    X_list, y_list = get_data()

    print("Loaded data...")
    print('\n')
    dataset_names = DataLoader.get_all_files('Data')
    dataset_names = [name.split('/')[1].split('.')[0] for name in dataset_names]
    undersample = True

    for i, (X, y) in enumerate(zip(X_list, y_list)):
        print("Dataset: {}".format(dataset_names[i]))

        X = np.array(X)
        y = np.array(y)

        n = len(X)

        kf = KFold(n, random_state=1337, shuffle=True, n_folds=5)

        fold_accuracies = []
        fold_recalls = []
        fold_precisions =[]
        fold_aucs = []
        fold_f1s = []

        for fold_idx, (train, test) in enumerate(kf):
            X_train, X_test = X[train], X[test]
            y_train, y_test = y[train], y[test]

            if undersample:
                # Get all the targets that are not relevant i.e., y = 0
                idx_undersample = np.where(y_train == -1)[0]

                # Get all the targets that are relevant i.e., y = 1
                idx_positive = np.where(y_train == 1)[0]
                # Now sample from the no relevant targets
                random_negative_sample = np.random.choice(idx_undersample, idx_positive.shape[0])

                X_train_positive = X_train[idx_positive]

                X_train_negative = X_train[random_negative_sample]

                X_train_undersample = np.hstack((X_train_positive, X_train_negative))

                y_train_positive = y_train[idx_positive]
                y_train_negative = y_train[random_negative_sample]
                y_train_undersample = np.hstack((y_train_positive, y_train_negative))

            count_vec = CountVectorizer(ngram_range=(1, 3), max_features=50000)

            count_vec.fit(X_train)

            if undersample:
                X_train = X_train_undersample
                y_train = y_train_undersample

            X_train_undersample = count_vec.transform(X_train)
            X_test = count_vec.transform(X_test)

            svm = SVM()
            svm.train(X_train_undersample, y_train)
            svm.test(X_test, y_test)

            f1_score = svm.metrics["F1"]
            precision = svm.metrics["Precision"]
            recall = svm.metrics["Recall"]
            auc = svm.metrics["AUC"]
            accuracy = svm.metrics["Accuracy"]

            fold_accuracies.append(accuracy)
            fold_precisions.append(precision)
            fold_recalls.append(recall)
            fold_aucs.append(auc)
            fold_f1s.append(f1_score)

        average_accuracy = np.mean(fold_accuracies)
        average_precision = np.mean(fold_precisions)
        average_recall = np.mean(fold_recalls)
        average_auc = np.mean(fold_aucs)
        average_f1 = np.mean(fold_f1s)

        print("Fold Average Accuracy: {}".format(average_accuracy))
        print("Fold Average F1: {}".format(average_f1))
        print("Fold Average Precision: {}".format(average_precision))
        print("Fold Average AUC: {}".format(average_auc))
        print("Fold Average Recall: {}".format(average_recall))
        print('\n')
def main():
    try:
        opts, args = getopt.getopt(sys.argv[1:], '', ['n_feature_maps=', 'epochs=', 'max_words=', 'dropout_p=',
                                                      'undersample=', 'n_feature_maps=', 'criterion=',
                                                      'optimizer=', 'max_words=', 'layers=',
                                                      'hyperopt=', 'experiment_name=', 'w2v_path=', 'tacc=',
                                                      'use_all_date=', 'tacc=', 'pretrain=', 'undersample_all=',
                                                      'save_model=', 'transfer_learning='])
    except getopt.GetoptError as error:
        print(error)
        sys.exit(2)

    w2v_path = '/Users/ericrincon/PycharmProjects/Deep-PICO/wikipedia-pubmed-and-PMC-w2v.bin'
    epochs = 50
    criterion = 'categorical_crossentropy'
    optimizer = 'adam'
    experiment_name = 'abstractCNN'
    w2v_size = 200
    activation = 'relu'
    dense_sizes = [400, 400]
    max_words = {'text': 270, 'mesh': 50, 'title': 17}

    filter_sizes = {'text': [2, 3, 4, 5],
                    'mesh': [2, 3, 4, 5],
                    'title': [2, 3, 4, 5]}
    n_feature_maps = {'text': 100, 'mesh': 50, 'title': 50}
    word_vector_size = 200
    using_tacc = False
    undersample = False
    use_embedding = False
    embedding = None
    use_all_date = False
    patience = 50
    p = .5
    verbose = 0
    pretrain = True
    filter_small_data = True
    save_model = False
    load_data_from_scratch = False
    print_output = True
    transfer_learning = False

    for opt, arg in opts:
        if opt == '--save_model':
            if int(arg) == 0:
                save_model = False
            elif int(arg) ==  1:
                save_model = True
        elif opt == '--transfer_learning':
            if int(arg) == 1:
                transfer_learning = True
            elif int(arg) == 0:
                transfer_learning = False
        elif opt == '--undersample_all':
            if int(arg) == 0:
                undersample_all = False
            elif int(arg) == 1:
                undersample_all = True
        elif opt == '--pretrain':
            if int(arg) == 0:
                pretrain = False
            elif int(arg) == 1:
                pretrain = True
            else:
                print("Invalid input")

        elif opt == '--verbose':
            verbose = int(arg)
        elif opt == '--use_embedding':
            if int(arg) == 0:
                use_embedding = False
        elif opt == '--dropout_p':
            p = float(arg)
        elif opt == '--epochs':
            epochs = int(arg)
        elif opt == '--layers':
            layer_sizes = arg.split(',')
        elif opt == '--n_feature_maps':
            n_feature_maps = int(arg)
        elif opt == '--n_feature_maps':
            n_feature_maps = int(arg)
        elif opt == '--criterion':
            criterion = arg
        elif opt == '--optimizer':
            optimizer = arg
        elif opt == '--tacc':
            if int(arg) == 1:
                using_tacc = True
        elif opt == '--hyperopt':
            if int(arg) == 1:
                hyperopt = True
        elif opt == '--experiment_name':
            experiment_name = arg
        elif opt == '--max_words':
            max_words = int(arg)
        elif opt == '--w2v_path':
            w2v_path = arg
        elif opt == '--word_vector_size':
            word_vector_size = int(arg)
        elif opt == '--use_all_data':
            if int(arg) == 1:
                use_all_date = True
        elif opt == '--patience':
            patience = int(arg)

        elif opt == '--undersample':
            if int(arg) == 0:
                undersample = False
            elif int(arg) == 1:
                undersample = True
        elif opt == '--tacc':
            if int(arg) == 1:
                using_tacc = True

        else:
            print("Option {} is not valid!".format(opt))


    if using_tacc:
        nltk.data.path.append('/work/03186/ericr/nltk_data/')
    print('Loading data...')

    if load_data_from_scratch:

        print('Loading Word2Vec...')
        w2v = Word2Vec.load_word2vec_format(w2v_path, binary=True)
        print('Loaded Word2Vec...')
        X_list = []
        y_list = []

        if use_embedding:

            X_list, y_list, embedding_list = DataLoader.get_data_as_seq(w2v, w2v_size, max_words)

        else:
            X_list, y_list = DataLoader.get_data_separately(max_words, word_vector_size,
                                                            w2v, use_abstract_cnn=True,
                                                            preprocess_text=False,
                                                            filter_small_data=filter_small_data)
    else:
        X_list, y_list = DataLoader.load_datasets_from_h5py('DataProcessed', True)


    print('Loaded data...')
    dataset_names = DataLoader.get_all_files('DataProcessed')
    dataset_names = [x.split('/')[-1].split('.')[0] for x in dataset_names]

    results_file = open(experiment_name + "_results.txt", "w+")

    for dataset_i, (X, y) in enumerate(zip(X_list, y_list)):
        if use_embedding:
            embedding = embedding_list[dataset_i]

        model_name = dataset_names[dataset_i]

        print("Dataset: {}".format(model_name))

        results_file.write(model_name)
        results_file.write("Dataset: {}".format(model_name))

        X_abstract, X_title, X_mesh = X['text'], X['title'], X['mesh']
        n = X_abstract.shape[0]
        kf = KFold(n, random_state=1337, shuffle=True, n_folds=5)

        if pretrain:
            pretrain_fold_accuracies = []
            pretrain_fold_recalls = []
            pretrain_fold_precisions =[]
            pretrain_fold_aucs = []
            pretrain_fold_f1s = []

        if transfer_learning:
            svm_fold_accuracies = []
            svm_fold_recalls = []
            svm_fold_precisions =[]
            svm_fold_aucs = []
            svm_fold_f1s = []

        fold_accuracies = []
        fold_recalls = []
        fold_precisions =[]
        fold_aucs = []
        fold_f1s = []

        for fold_idx, (train, test) in enumerate(kf):
            temp_model_name = experiment_name + '_' + model_name + '_fold_{}'.format(fold_idx + 1)


            cnn = AbstractCNN(n_classes=2,  max_words=max_words, w2v_size=w2v_size, vocab_size=1000, use_embedding=use_embedding,
                              filter_sizes=filter_sizes, n_feature_maps=n_feature_maps, dense_layer_sizes=dense_sizes.copy(),
                              name=temp_model_name, activation_function=activation, dropout_p=p, embedding=embedding)

            if pretrain:
                X_abstract_train = X_abstract[train, :, :]
                X_title_train = X_title[train, :, :]
                X_mesh_train = X_mesh[train, :, :]

                y_train = y[train, :]

                X_abstract_test = X_abstract[test, :, :]
                X_title_test = X_title[test, :, :]
                X_mesh_test = X_mesh[test, :, :]

                y_test = y[test, :]

                for i, (_x, _y) in enumerate(zip(X_list, y_list)):
                    if not i == dataset_i:
                        X_abstract_train = np.vstack((X_abstract_train, _x['text'][()]))
                        X_title_train = np.vstack((X_title_train, _x['title'][()]))
                        X_mesh_train = np.vstack((X_mesh_train, _x['mesh'][()]))
                        y_train = np.vstack((y_train, _y[()]))
                print(X_abstract_train.shape)

                cnn.train(X_abstract_train, X_title_train, X_mesh_train, y_train, n_epochs=epochs,
                          optim_algo=optimizer, criterion=criterion, verbose=verbose, patience=patience,
                          save_model=save_model)


                accuracy, f1_score, precision, auc, recall = cnn.test(X_abstract_test, X_title_test, X_mesh_test, y_test,
                                                                      print_output=True)

                print("Results from training on all data only")

                print("Accuracy: {}".format(accuracy))
                print("F1: {}".format(f1_score))
                print("Precision: {}".format(precision))
                print("AUC: {}".format(auc))
                print("Recall: {}".format(recall))
                print("\n")

                pretrain_fold_accuracies.append(accuracy)
                pretrain_fold_precisions.append(precision)
                pretrain_fold_recalls.append(recall)
                pretrain_fold_aucs.append(auc)
                pretrain_fold_f1s.append(f1_score)

            if not use_embedding:
                X_abstract_train = X_abstract[train, :, :]
                X_title_train = X_title[train, :, :]
                X_mesh_train = X_mesh[train, :, :]
                y_train = y[train, :]

                X_abstract_test = X_abstract[test, :, :]
                X_titles_test = X_title[test, :, :]
                X_mesh_test = X_mesh[test, :, :]
                y_test = y[test, :]

            elif use_embedding:
                X_abstract_train = X_abstract[train]
                X_title_train = X_title[train]
                X_mesh_train = X_mesh[train]
                y_train = y[train, :]

                X_abstract_test = X_abstract[test]
                X_titles_test = X_title[test]
                X_mesh_test = X_mesh[test]
                y_test = y[test, :]

                if undersample:
                    X_abstract_train, X_title_train, X_mesh_train, y_train = \
                        DataLoader.undersample_seq(X_abstract_train, X_title_train, X_mesh_train, y_train)

            cnn.train(X_abstract_train, X_title_train, X_mesh_train, y_train, n_epochs=epochs, optim_algo=optimizer,
                      criterion=criterion, verbose=verbose, patience=patience,
                      save_model=save_model)
            accuracy, f1_score, precision, auc, recall = cnn.test(X_abstract_test, X_titles_test, X_mesh_test, y_test,
                                                                  print_output)

            if transfer_learning:
                svm = SVM()

                # Transfer weights
                X_transfer_train = cnn.output_learned_features([X_abstract_train, X_title_train, X_mesh_train])
                X_transfer_test = cnn.output_learned_features([X_abstract_test, X_titles_test, X_mesh_test])

                svm.train(X_transfer_train, DataLoader.onehot2list(y_train))
                svm.test(X_transfer_test, DataLoader.onehot2list(y_test))

                print("\nSVM results")
                print(svm)
                print('\n')

                svm_fold_accuracies.append(svm.metrics['Accuracy'])
                svm_fold_precisions.append(svm.metrics['Precision'])
                svm_fold_aucs.append(svm.metrics['AUC'])
                svm_fold_recalls.append(svm.metrics['Recall'])
                svm_fold_f1s.append(svm.metrics['F1'])

            print('CNN results')
            print("Accuracy: {}".format(accuracy))
            print("F1: {}".format(f1_score))
            print("Precision: {}".format(precision))
            print("AUC: {}".format(auc))
            print("Recall: {}".format(recall))

            fold_accuracies.append(accuracy)
            fold_precisions.append(precision)
            fold_recalls.append(recall)
            fold_aucs.append(auc)
            fold_f1s.append(f1_score)



        if pretrain:
            pretrain_average_accuracy = np.mean(pretrain_fold_accuracies)
            pretrain_average_precision = np.mean(pretrain_fold_precisions)
            pretrain_average_recall = np.mean(pretrain_fold_recalls)
            pretrain_average_auc = np.mean(pretrain_fold_aucs)
            pretrain_average_f1 = np.mean(pretrain_fold_f1s)

            print("\nAverage results from using all data")
            print("Fold Average Accuracy: {}".format(pretrain_average_accuracy))
            print("Fold Average F1: {}".format(pretrain_average_f1))
            print("Fold Average Precision: {}".format(pretrain_average_precision))
            print("Fold Average AUC: {}".format(pretrain_average_auc))
            print("Fold Average Recall: {}".format(pretrain_average_recall))
            print('\n')



        average_accuracy = np.mean(fold_accuracies)
        average_precision = np.mean(fold_precisions)
        average_recall = np.mean(fold_recalls)
        average_auc = np.mean(fold_aucs)
        average_f1 = np.mean(fold_f1s)


        print('CNN Results')
        print("Fold Average Accuracy: {}".format(average_accuracy))
        print("Fold Average F1: {}".format(average_f1))
        print("Fold Average Precision: {}".format(average_precision))
        print("Fold Average AUC: {}".format(average_auc))
        print("Fold Average Recall: {}".format(average_recall))
        print('\n')

        results_file.write("CNN results\n")
        results_file.write("Fold Average Accuracy: {}\n".format(average_accuracy))
        results_file.write("Fold Average F1: {}\n".format(average_f1))
        results_file.write("Fold Average Precision: {}\n".format(average_precision))
        results_file.write("Fold Average AUC: {}\n".format(average_auc))
        results_file.write("Fold Average Recall: {}\n".format(average_recall))
        results_file.write('\n')

        if transfer_learning:
            average_accuracy = np.mean(svm_fold_accuracies)
            average_precision = np.mean(svm_fold_precisions)
            average_recall = np.mean(svm_fold_recalls)
            average_auc = np.mean(svm_fold_aucs)
            average_f1 = np.mean(svm_fold_f1s)

            print("SVM with cnn features")
            print("Fold Average Accuracy: {}".format(average_accuracy))
            print("Fold Average F1: {}".format(average_f1))
            print("Fold Average Precision: {}".format(average_precision))
            print("Fold Average AUC: {}".format(average_auc))
            print("Fold Average Recall: {}".format(average_recall))
            print('\n')

            results_file.write("SVM with cnn features\n")
            results_file.write("Fold Average Accuracy: {}\n".format(average_accuracy))
            results_file.write("Fold Average F1: {}\n".format(average_f1))
            results_file.write("Fold Average Precision: {}\n".format(average_precision))
            results_file.write("Fold Average AUC: {}\n".format(average_auc))
            results_file.write("Fold Average Recall: {}\n".format(average_recall))
            results_file.write('\n')
Esempio n. 23
0
name = 'stdev2'
print '======Training======'
# load data from csv files
train = loadtxt('newData-2/data_'+name+'_train.csv')
#train = loadtxt('data/data_'+name+'_train.csv')
# use deep copy here to make cvxopt happy
X = train[:, 0:2].copy()
Y = train[:, 2:3].copy()

#X = np.array([[1.0,2.0],[2.0,2.0],[0.0,0.0],[-2.0,3.0]])
#Y = np.array([[1.0],[1.0],[-1.0],[-1.0]])

# Carry out training, primal and/or dual
C = 1
svm = SVM(X,Y,C)
svm.train()
#model = svm.train_gold()

# Define the predictSVM(x) function, which uses trained parameters
def predictSVM(x):
	return svm.test(x)
	#return svm.test_gold(x,model)


# plot training results
plotDecisionBoundary(X, Y, predictSVM, [-1, 0, 1], title = 'SVM Train')



print '======Validation======'
# load data from csv files