Esempio n. 1
0
def part_b(org_train, org_train_labels, best_lr):
    """
    This function implements part B
    :return: best learning rate
    """
    print "part b - start"
    c_lst = np.array(list(range(1, 999, 10))).astype("float32") / 1000.0

    validating_acc_lst = []
    for c in c_lst:
        mean_acc = 0
        for i in range(10):
            svm = SVM(org_train.shape[1])
            svm.train(org_train, org_train_labels, best_lr, C=c, T=1000)
            mean_acc += svm.test(org_validation, org_validation_labels)
        validating_acc_lst.append(mean_acc / (i + 1))
    plt.figure()
    plot_graph(validating_acc_lst, c_lst, "q3_part_b", "",
               "Accuracy vs C for SVM", "Accuracy", "C")
    best_acc_indx = validating_acc_lst.index(max(validating_acc_lst))
    best_c = c_lst[best_acc_indx]
    print "The best C is {} for accuracy: {}".format(best_c,
                                                     max(validating_acc_lst))
    print "part b - done"
    return best_c
Esempio n. 2
0
def part_a(org_train, org_train_labels):
    """
    This function calculates part A
    :return: best learning rate
    """
    print "part a - start"
    learning_rate_lst = np.array(list(range(1, 99,
                                            1))).astype("float32") / 100.0

    validating_acc_lst = []
    for lr in learning_rate_lst:
        mean_acc = 0
        for i in range(10):
            svm = SVM(org_train.shape[1])
            svm.train(org_train, org_train_labels, lr, T=1000)
            mean_acc += svm.test(org_validation, org_validation_labels)
        validating_acc_lst.append(mean_acc / (i + 1))
    plt.figure()
    plot_graph(validating_acc_lst, learning_rate_lst, "q3_part_a", "",
               "Accuracy vs Learning Rate for SVM", "Accuracy",
               "Learning Rate")
    best_acc_indx = validating_acc_lst.index(max(validating_acc_lst))
    best_lr = learning_rate_lst[best_acc_indx]
    print "The best learning rate is {} for accuracy: {}".format(
        best_lr, max(validating_acc_lst))
    print "part a - done"
    return best_lr
Esempio n. 3
0
def train_and_test(train_x, test_x, training_class, testing_class, kernel):
    classes = Counter(training_class)
    classes = classes.keys()
    total_accuracy = []
    for label in classes:
        train_y = []
        for t in training_class:
            if t == label:
                train_y.append(1.0)
            else:
                train_y.append(-1.0)
        train_y = np.array(train_y)

        test_y = []
        for t in testing_class:
            if t == label:
                test_y.append(1.0)
            else:
                test_y.append(-1.0)
        test_y = np.array(test_y)

        classfier = SVM(kernel=kernel, C=0.1)
        classfier.train(train_x, train_y)
        y_predict = classfier.test(test_x)
        correct = np.sum(y_predict == test_y)
        print("%d out of %d predictions correct" % (correct, len(y_predict)))
        accuracy = correct / len(y_predict)
        print("accuracy is {}".format(accuracy))
        total_accuracy.append(accuracy)
    mean_accuracy = np.mean(np.array(total_accuracy))
    print('mean accuracy is {}'.format(mean_accuracy))

    return mean_accuracy
def main():
    try:
        opts, args = getopt.getopt(sys.argv[1:], '', ['n_feature_maps=', 'epochs=', 'max_words=', 'dropout_p=',
                                                      'undersample=', 'n_feature_maps=', 'criterion=',
                                                      'optimizer=', 'max_words=', 'layers=',
                                                      'hyperopt=', 'experiment_name=', 'w2v_path=', 'tacc=',
                                                      'use_all_date=', 'tacc=', 'pretrain=', 'undersample_all=',
                                                      'save_model=', 'transfer_learning='])
    except getopt.GetoptError as error:
        print(error)
        sys.exit(2)

    w2v_path = '/Users/ericrincon/PycharmProjects/Deep-PICO/wikipedia-pubmed-and-PMC-w2v.bin'
    epochs = 50
    criterion = 'categorical_crossentropy'
    optimizer = 'adam'
    experiment_name = 'abstractCNN'
    w2v_size = 200
    activation = 'relu'
    dense_sizes = [400, 400]
    max_words = {'text': 270, 'mesh': 50, 'title': 17}

    filter_sizes = {'text': [2, 3, 4, 5],
                    'mesh': [2, 3, 4, 5],
                    'title': [2, 3, 4, 5]}
    n_feature_maps = {'text': 100, 'mesh': 50, 'title': 50}
    word_vector_size = 200
    using_tacc = False
    undersample = False
    use_embedding = False
    embedding = None
    use_all_date = False
    patience = 50
    p = .5
    verbose = 0
    pretrain = True
    filter_small_data = True
    save_model = False
    load_data_from_scratch = False
    print_output = True
    transfer_learning = False

    for opt, arg in opts:
        if opt == '--save_model':
            if int(arg) == 0:
                save_model = False
            elif int(arg) ==  1:
                save_model = True
        elif opt == '--transfer_learning':
            if int(arg) == 1:
                transfer_learning = True
            elif int(arg) == 0:
                transfer_learning = False
        elif opt == '--undersample_all':
            if int(arg) == 0:
                undersample_all = False
            elif int(arg) == 1:
                undersample_all = True
        elif opt == '--pretrain':
            if int(arg) == 0:
                pretrain = False
            elif int(arg) == 1:
                pretrain = True
            else:
                print("Invalid input")

        elif opt == '--verbose':
            verbose = int(arg)
        elif opt == '--use_embedding':
            if int(arg) == 0:
                use_embedding = False
        elif opt == '--dropout_p':
            p = float(arg)
        elif opt == '--epochs':
            epochs = int(arg)
        elif opt == '--layers':
            layer_sizes = arg.split(',')
        elif opt == '--n_feature_maps':
            n_feature_maps = int(arg)
        elif opt == '--n_feature_maps':
            n_feature_maps = int(arg)
        elif opt == '--criterion':
            criterion = arg
        elif opt == '--optimizer':
            optimizer = arg
        elif opt == '--tacc':
            if int(arg) == 1:
                using_tacc = True
        elif opt == '--hyperopt':
            if int(arg) == 1:
                hyperopt = True
        elif opt == '--experiment_name':
            experiment_name = arg
        elif opt == '--max_words':
            max_words = int(arg)
        elif opt == '--w2v_path':
            w2v_path = arg
        elif opt == '--word_vector_size':
            word_vector_size = int(arg)
        elif opt == '--use_all_data':
            if int(arg) == 1:
                use_all_date = True
        elif opt == '--patience':
            patience = int(arg)

        elif opt == '--undersample':
            if int(arg) == 0:
                undersample = False
            elif int(arg) == 1:
                undersample = True
        elif opt == '--tacc':
            if int(arg) == 1:
                using_tacc = True

        else:
            print("Option {} is not valid!".format(opt))


    if using_tacc:
        nltk.data.path.append('/work/03186/ericr/nltk_data/')
    print('Loading data...')

    if load_data_from_scratch:

        print('Loading Word2Vec...')
        w2v = Word2Vec.load_word2vec_format(w2v_path, binary=True)
        print('Loaded Word2Vec...')
        X_list = []
        y_list = []

        if use_embedding:

            X_list, y_list, embedding_list = DataLoader.get_data_as_seq(w2v, w2v_size, max_words)

        else:
            X_list, y_list = DataLoader.get_data_separately(max_words, word_vector_size,
                                                            w2v, use_abstract_cnn=True,
                                                            preprocess_text=False,
                                                            filter_small_data=filter_small_data)
    else:
        X_list, y_list = DataLoader.load_datasets_from_h5py('DataProcessed', True)


    print('Loaded data...')
    dataset_names = DataLoader.get_all_files('DataProcessed')
    dataset_names = [x.split('/')[-1].split('.')[0] for x in dataset_names]

    results_file = open(experiment_name + "_results.txt", "w+")

    for dataset_i, (X, y) in enumerate(zip(X_list, y_list)):
        if use_embedding:
            embedding = embedding_list[dataset_i]

        model_name = dataset_names[dataset_i]

        print("Dataset: {}".format(model_name))

        results_file.write(model_name)
        results_file.write("Dataset: {}".format(model_name))

        X_abstract, X_title, X_mesh = X['text'], X['title'], X['mesh']
        n = X_abstract.shape[0]
        kf = KFold(n, random_state=1337, shuffle=True, n_folds=5)

        if pretrain:
            pretrain_fold_accuracies = []
            pretrain_fold_recalls = []
            pretrain_fold_precisions =[]
            pretrain_fold_aucs = []
            pretrain_fold_f1s = []

        if transfer_learning:
            svm_fold_accuracies = []
            svm_fold_recalls = []
            svm_fold_precisions =[]
            svm_fold_aucs = []
            svm_fold_f1s = []

        fold_accuracies = []
        fold_recalls = []
        fold_precisions =[]
        fold_aucs = []
        fold_f1s = []

        for fold_idx, (train, test) in enumerate(kf):
            temp_model_name = experiment_name + '_' + model_name + '_fold_{}'.format(fold_idx + 1)


            cnn = AbstractCNN(n_classes=2,  max_words=max_words, w2v_size=w2v_size, vocab_size=1000, use_embedding=use_embedding,
                              filter_sizes=filter_sizes, n_feature_maps=n_feature_maps, dense_layer_sizes=dense_sizes.copy(),
                              name=temp_model_name, activation_function=activation, dropout_p=p, embedding=embedding)

            if pretrain:
                X_abstract_train = X_abstract[train, :, :]
                X_title_train = X_title[train, :, :]
                X_mesh_train = X_mesh[train, :, :]

                y_train = y[train, :]

                X_abstract_test = X_abstract[test, :, :]
                X_title_test = X_title[test, :, :]
                X_mesh_test = X_mesh[test, :, :]

                y_test = y[test, :]

                for i, (_x, _y) in enumerate(zip(X_list, y_list)):
                    if not i == dataset_i:
                        X_abstract_train = np.vstack((X_abstract_train, _x['text'][()]))
                        X_title_train = np.vstack((X_title_train, _x['title'][()]))
                        X_mesh_train = np.vstack((X_mesh_train, _x['mesh'][()]))
                        y_train = np.vstack((y_train, _y[()]))
                print(X_abstract_train.shape)

                cnn.train(X_abstract_train, X_title_train, X_mesh_train, y_train, n_epochs=epochs,
                          optim_algo=optimizer, criterion=criterion, verbose=verbose, patience=patience,
                          save_model=save_model)


                accuracy, f1_score, precision, auc, recall = cnn.test(X_abstract_test, X_title_test, X_mesh_test, y_test,
                                                                      print_output=True)

                print("Results from training on all data only")

                print("Accuracy: {}".format(accuracy))
                print("F1: {}".format(f1_score))
                print("Precision: {}".format(precision))
                print("AUC: {}".format(auc))
                print("Recall: {}".format(recall))
                print("\n")

                pretrain_fold_accuracies.append(accuracy)
                pretrain_fold_precisions.append(precision)
                pretrain_fold_recalls.append(recall)
                pretrain_fold_aucs.append(auc)
                pretrain_fold_f1s.append(f1_score)

            if not use_embedding:
                X_abstract_train = X_abstract[train, :, :]
                X_title_train = X_title[train, :, :]
                X_mesh_train = X_mesh[train, :, :]
                y_train = y[train, :]

                X_abstract_test = X_abstract[test, :, :]
                X_titles_test = X_title[test, :, :]
                X_mesh_test = X_mesh[test, :, :]
                y_test = y[test, :]

            elif use_embedding:
                X_abstract_train = X_abstract[train]
                X_title_train = X_title[train]
                X_mesh_train = X_mesh[train]
                y_train = y[train, :]

                X_abstract_test = X_abstract[test]
                X_titles_test = X_title[test]
                X_mesh_test = X_mesh[test]
                y_test = y[test, :]

                if undersample:
                    X_abstract_train, X_title_train, X_mesh_train, y_train = \
                        DataLoader.undersample_seq(X_abstract_train, X_title_train, X_mesh_train, y_train)

            cnn.train(X_abstract_train, X_title_train, X_mesh_train, y_train, n_epochs=epochs, optim_algo=optimizer,
                      criterion=criterion, verbose=verbose, patience=patience,
                      save_model=save_model)
            accuracy, f1_score, precision, auc, recall = cnn.test(X_abstract_test, X_titles_test, X_mesh_test, y_test,
                                                                  print_output)

            if transfer_learning:
                svm = SVM()

                # Transfer weights
                X_transfer_train = cnn.output_learned_features([X_abstract_train, X_title_train, X_mesh_train])
                X_transfer_test = cnn.output_learned_features([X_abstract_test, X_titles_test, X_mesh_test])

                svm.train(X_transfer_train, DataLoader.onehot2list(y_train))
                svm.test(X_transfer_test, DataLoader.onehot2list(y_test))

                print("\nSVM results")
                print(svm)
                print('\n')

                svm_fold_accuracies.append(svm.metrics['Accuracy'])
                svm_fold_precisions.append(svm.metrics['Precision'])
                svm_fold_aucs.append(svm.metrics['AUC'])
                svm_fold_recalls.append(svm.metrics['Recall'])
                svm_fold_f1s.append(svm.metrics['F1'])

            print('CNN results')
            print("Accuracy: {}".format(accuracy))
            print("F1: {}".format(f1_score))
            print("Precision: {}".format(precision))
            print("AUC: {}".format(auc))
            print("Recall: {}".format(recall))

            fold_accuracies.append(accuracy)
            fold_precisions.append(precision)
            fold_recalls.append(recall)
            fold_aucs.append(auc)
            fold_f1s.append(f1_score)



        if pretrain:
            pretrain_average_accuracy = np.mean(pretrain_fold_accuracies)
            pretrain_average_precision = np.mean(pretrain_fold_precisions)
            pretrain_average_recall = np.mean(pretrain_fold_recalls)
            pretrain_average_auc = np.mean(pretrain_fold_aucs)
            pretrain_average_f1 = np.mean(pretrain_fold_f1s)

            print("\nAverage results from using all data")
            print("Fold Average Accuracy: {}".format(pretrain_average_accuracy))
            print("Fold Average F1: {}".format(pretrain_average_f1))
            print("Fold Average Precision: {}".format(pretrain_average_precision))
            print("Fold Average AUC: {}".format(pretrain_average_auc))
            print("Fold Average Recall: {}".format(pretrain_average_recall))
            print('\n')



        average_accuracy = np.mean(fold_accuracies)
        average_precision = np.mean(fold_precisions)
        average_recall = np.mean(fold_recalls)
        average_auc = np.mean(fold_aucs)
        average_f1 = np.mean(fold_f1s)


        print('CNN Results')
        print("Fold Average Accuracy: {}".format(average_accuracy))
        print("Fold Average F1: {}".format(average_f1))
        print("Fold Average Precision: {}".format(average_precision))
        print("Fold Average AUC: {}".format(average_auc))
        print("Fold Average Recall: {}".format(average_recall))
        print('\n')

        results_file.write("CNN results\n")
        results_file.write("Fold Average Accuracy: {}\n".format(average_accuracy))
        results_file.write("Fold Average F1: {}\n".format(average_f1))
        results_file.write("Fold Average Precision: {}\n".format(average_precision))
        results_file.write("Fold Average AUC: {}\n".format(average_auc))
        results_file.write("Fold Average Recall: {}\n".format(average_recall))
        results_file.write('\n')

        if transfer_learning:
            average_accuracy = np.mean(svm_fold_accuracies)
            average_precision = np.mean(svm_fold_precisions)
            average_recall = np.mean(svm_fold_recalls)
            average_auc = np.mean(svm_fold_aucs)
            average_f1 = np.mean(svm_fold_f1s)

            print("SVM with cnn features")
            print("Fold Average Accuracy: {}".format(average_accuracy))
            print("Fold Average F1: {}".format(average_f1))
            print("Fold Average Precision: {}".format(average_precision))
            print("Fold Average AUC: {}".format(average_auc))
            print("Fold Average Recall: {}".format(average_recall))
            print('\n')

            results_file.write("SVM with cnn features\n")
            results_file.write("Fold Average Accuracy: {}\n".format(average_accuracy))
            results_file.write("Fold Average F1: {}\n".format(average_f1))
            results_file.write("Fold Average Precision: {}\n".format(average_precision))
            results_file.write("Fold Average AUC: {}\n".format(average_auc))
            results_file.write("Fold Average Recall: {}\n".format(average_recall))
            results_file.write('\n')
Esempio n. 5
0
def init():

    def plot(arg):
        plt.xlabel('Iterations')
        plt.ylabel(arg)
        plt.legend()
        plt.show()
        plt.clf()
		
	# Arguments Parser Structure
    parser = argparse.ArgumentParser(description='Classification models: Logistic Regression, SVM, Naive Bayes Training.')
    parser.add_argument('-p', '--preprocess', action='store_true', help='perform preprocessing of emails')
    parser.add_argument('-f', '--figure', action='store_true', help='plot training figures (performs validation)')

	# Parse Arguments
    parsed = parser.parse_args()
    preprocess = parsed.preprocess
    figure = parsed.figure
    
    dataHandler = DataHandler()
	
    if preprocess:
        print('Extracting Features ............. ', end='', flush=True)
        start = time.time()
        dataHandler.saveFeatures()
        print('done -- ' + str(round(time.time()-start, 3)) + 's')

    print('Loading Data .................... ')
    start = time.time()
    x_train, y_train, x_test, y_test = dataHandler.loadTrainingData()
    x_val = deepcopy(x_test)
    y_val = deepcopy(y_test)
    
	#Logistic Regression
    logistic = LogisticRegression(lr=0.2, num_iter=1000, val=figure)
    start = time.time()
    train_history, val_history = logistic.fit(x_train, y_train, x_val, y_val)
    plt.plot(range(len(train_history)), train_history, label='Training Loss')
    plt.plot(range(len(val_history)), val_history, label='Validation Loss')
    
    accuracy = logistic.test(x_test, y_test)
    print('Test Accurarcy: {}%'.format(round(100*accuracy, 2)))

    # Plot
    if figure: plot("Loss")
    
	#SVM Training
    svm = SVM(lr=0.1, num_iter=420, val=figure)
    start = time.time()
    train_history, val_history = svm.fit(x_train, y_train, x_val, y_val)
    plt.plot(range(len(train_history)), train_history, label='Training Misclassification Ratio')
    plt.plot(range(len(val_history)), val_history, label='Validation Missclassification Ratio')
    accuracy = svm.test(x_test, y_test)
    print('Test Accurarcy: {}%'.format(round(100*accuracy, 2)))
    
    # Plot
    if figure: plot("Missclassification Ratio")

    #Naive Bayes
    bayes = NaiveBayes()
    start = time.time()
    bayes.fit(x_train, y_train)
    accuracy = bayes.test(x_test, y_test)
    print('Test Accurarcy: {}%'.format(round(100*accuracy, 2)))
Esempio n. 6
0
        predictlabel = knn.training(trainset=dataset.trainset,
                                    testset=dataset.testset,
                                    trainlabel=dataset.trainlabel,
                                    K=9)
        print(predictlabel)
        evaluation(dataset.testlabel, predictlabel)

    elif method == 'svm':
        print("---------running SVM------------")
        svm = SVM()
        dataset.preprocessing(matfile, 0.7, usingG=1, isSVM=1)
        svm.training(trainset=dataset.trainset,
                     trainlabel=dataset.trainlabel,
                     sigma=5,
                     C=100)
        predictlabel = svm.test(testset=dataset.testset)
        print(predictlabel)
        evaluation(dataset.testlabel, predictlabel)

    elif method == 'other':
        print("---------running ------------")
        lr = LogisticRegression()
        dataset.preprocessing(porfile, 0.7, usingG=0, isSVM=0)
        lr.training(trainset=dataset.trainset,
                    trainlabel=dataset.trainlabel,
                    alpha=0.05,
                    iteration=500)
        predictlabel = lr.test(testset=dataset.testset)
        print(predictlabel)
        evaluation(dataset.testlabel, predictlabel)
    else:
Esempio n. 7
0
def transfer_learning(print_output=True):
    path = 'datasets/'
    data_loader = DataLoader(path)
    names = {1: 'title', 4: 'abstract', 5: 'mesh', 'y': 6}
    transformed_data_sets = []

    path = 'datasets/'

    files = [f for f in listdir(path) if isfile(join(path,f))]
    files.pop(0)
    data_loader = DataLoader(path)
    domains = data_loader.csv_files
    all_domains = copy.deepcopy(domains)
    training_domains = data_loader.csv_files
    all_domains_svm_wda_metrics_list = []
    all_domains_svm_metrics_list = []
    all_domains_svm_bow_mlp_list = []
    all_domains_mlp_fold_scores = []

    for i, held_out_domain in enumerate(domains):
        training_domains.pop(i)
        names = {1: 'title', 4: 'abstract', 5: 'mesh', 'y': 6}
        svm_wda_metrics_list = []
        svm_metrics_list = []
        svm_bow_mlp_list = []

        folder_name = '/' + files[i]
        domain_name = files[i].__str__()
        domain_name = domain_name.split('.')[0]
        folder_name = 'output' + '/' + domain_name

        output = "Dataset: {}".format(files[i])
        if print_output:
            print(output)

        #shuffle(data_loader.csv_files)
        data_loader.csv_files = training_domains
        data_sets = data_loader.csv_files
        domains = data_loader.get_feature_matrix(names)

        #Get one file out of the csv files in the dataloader use this as the held out domain

        #Get the feature representation of the held out data
        held_out_x, held_out_y = data_loader.get_feature_matrix(names, held_out_domain)
        #Create the folds for the held out data in this case the default 5
        folds = data_loader.cross_fold_valdation(held_out_x, held_out_y)
        #Get the total number of domains i.e., the number of files with documents
        n_source_domains = len(data_sets)
        os.makedirs(folder_name)

        #Must convert the data type of the matrix for theano
        feature_engineer = Feature_Engineer()

        #Start the 5 fold cross validation
        for n_fold, fold in enumerate(folds):
            output = "Fold {}: \n".format(n_fold)
            if print_output:
                print(output)
            output = '{}/{}/fold_{}.csv'.format(os.getcwd(), folder_name, (n_fold + 1))
            file = open(output, 'w')
            csv_writer = csv.writer(file)

            #Each sample is a list that contains the x and y for the classifier
            #Typically fold[0] would be the train sample but because it is switched for
            #testing the effectiveness of the domain adaptation
            train_sample = fold[1]
            test_sample = fold[0]

            #These are the original copies to be copied over the augmented feature matrix
            #Each sample contains the text and y labels from the data before it is put into the sklearn count vectorizer
            train_x, train_y = train_sample
            test_x, test_y = test_sample

            train_y[train_y == 0] = 2
            train_y[train_y == 1] = 3
            test_y[test_y == 0] = 2
            test_y[test_y == 1] = 3


            #Get the bag of words representation of the small 20% target source data and transform the other 80%
            #of the data.
            train_x = data_loader.get_transformed_features(train_x, True, False, True)
            test_x = data_loader.transform(test_x, True, True)

            transformed_domains = []

            #Transform the domains with respect to the training data
            for domain in domains:
                domain_x, domain_y = domain
                transformed_domain_x = data_loader.transform(domain_x, True, True)
                transformed_domain_x, domain_y = data_loader.underSample(transformed_domain_x, domain_y)
                transformed_domains.append([transformed_domain_x, domain_y])

            augmented_feature_matrix_train, augmented_y_train = feature_engineer.augmented_feature_matrix(transformed_domains,
                                                                                              [train_x, train_y])
            augmented_feature_matrix_test, augmented_y_test = feature_engineer.augmented_feature_matrix(held_out_domain=[test_x, test_y],
                                                                                                        train_or_test=False,
                                                                                                        n_source_domains=len(transformed_domains))
            augmented_y_test[augmented_y_test == 2] = 0
            augmented_y_test[augmented_y_test == 3] = 1
            #SVM with the augmented feature matrix for domain adaptation
            svm_wda = SVM()
            svm_wda.train(augmented_feature_matrix_train, augmented_y_train)
            svm_wda.test(augmented_feature_matrix_test, augmented_y_test)
            output = "\nSVM with domain adaptation metrics:"
            csv_writer.writerow([output])
            if print_output:
                print(output)
                print(svm_wda)
                print("\n")
            svm_wda_metrics_list.append(svm_wda.metrics)

            classifier = NeuralNet(n_hidden_units=[250], output_size=4, batch_size=20, n_epochs=200, dropout=True,
                                   activation_function='relu', learning_rate=.3, momentum=True, momentum_term=.5)
            write_to_csv(svm_wda.metrics, csv_writer)


            y_for_mlp = []
            #Set up the x and y data for the MLP
            for p, domain in enumerate(transformed_domains):
                domain_x, domain_y = domain
                domain_x = domain_x.todense()
                y_for_mlp.append(domain_y)

                if p == 0:
                    neural_net_x_train = domain_x
                    neural_net_y_train = domain_y
                else:
                    neural_net_x_train = numpy.vstack((neural_net_x_train, domain_x))
                    neural_net_y_train = numpy.hstack((neural_net_y_train, domain_y))

            neural_net_x_train = numpy.float_(neural_net_x_train)


            classifier.train(neural_net_x_train, neural_net_y_train)

            test_y[test_y == 2] = 0
            test_y[test_y == 3] = 1
            svm_y_train = neural_net_y_train
            svm_y_train[svm_y_train == 2] = 0
            svm_y_train[svm_y_train == 3] = 1

            #SVM without the domain adaptation
            svm = SVM()
            svm.train(sparse.coo_matrix(neural_net_x_train), svm_y_train)
            svm.test(test_x, test_y)
            output = "\nSVM without domain adaptation"
            if print_output:
                print(output)
                print(svm)
                print("\n")
            csv_writer.writerow([output])
            svm_metrics_list.append(svm.metrics)
            write_to_csv(svm.metrics, csv_writer)


            #Transform the feature vectors of the held out data to the learned hidden layer features of the previous
            #MLP trained with all n-1 datasets

            perceptron_train_x = theano.shared(neural_net_x_train)
            perceptron_test_x = theano.shared(test_x.todense())

            transformed_perceptron_train_x = classifier.transfer_learned_weights(perceptron_train_x)
            transformed_perceptron_test_x = classifier.transfer_learned_weights(perceptron_test_x)

            modified_transformed_perceptron_train_x = numpy.hstack((transformed_perceptron_train_x,
                                                                    neural_net_x_train))
            modified_transformed_perceptron_test_x = numpy.hstack((transformed_perceptron_test_x,
                                                                   test_x.todense()))

            output = "\nSVM with BoW and transformed features"
            csv_writer.writerow([output])
            if print_output:
                print(output)
            svm_mlp_bow = SVM()
            svm_mlp_bow.train(sparse.coo_matrix(modified_transformed_perceptron_train_x), svm_y_train)
            svm_mlp_bow.test(sparse.coo_matrix(modified_transformed_perceptron_test_x), test_y)
            write_to_csv(svm_mlp_bow.metrics, csv_writer)
            if print_output:
                print(svm_mlp_bow)
            svm_bow_mlp_list.append(svm_mlp_bow.metrics)


            output = "*********** End of fold {} ***********".format(n_fold)

            if print_output:
                print(output)


        training_domains = copy.deepcopy(all_domains)
        file_name = '{}/{}/fold_averages.csv'.format(os.getcwd(), folder_name)
        file = open(file_name, 'w+')
        csv_writer = csv.writer(file)

        if print_output:
            output = "----------------------------------------------------------------------------------------" \
                     "\nFold Scores\n " \
                     "SVM with domain adaptation"
            print_write_output(output, svm_wda_metrics_list, all_domains_svm_wda_metrics_list, csv_writer)

            output = "\nSVM without domain adaptation"
            print_write_output(output, svm_metrics_list, all_domains_svm_metrics_list, csv_writer)

            output = "SVM with BoW and transformed features"
            print_write_output(output, svm_bow_mlp_list, all_domains_svm_bow_mlp_list, csv_writer)



    file_name = '{}/output/all_fold_averages.csv'.format(os.getcwd())
    file = open(file_name, 'w+')
    csv_writer = csv.writer(file)
    if print_output:
        output = "*******************************************************************************************" \
                 "\nAll domain macro metric scores\n " \
                 "SVM with domain adaptation"
        print_macro_scores("SVM with domain adaptation", all_domains_svm_wda_metrics_list, csv_writer)

        output = "\nSVM without domain adaptation"
        print_macro_scores(output, all_domains_svm_metrics_list, csv_writer)

        output = "SVM with BoW and transformed features"
        print_macro_scores(output, all_domains_svm_bow_mlp_list, csv_writer)
def main():



    print("Loading data...")
    X_list, y_list = get_data()

    print("Loaded data...")
    print('\n')
    dataset_names = DataLoader.get_all_files('Data')
    dataset_names = [name.split('/')[1].split('.')[0] for name in dataset_names]
    undersample = True

    for i, (X, y) in enumerate(zip(X_list, y_list)):
        print("Dataset: {}".format(dataset_names[i]))

        X = np.array(X)
        y = np.array(y)

        n = len(X)

        kf = KFold(n, random_state=1337, shuffle=True, n_folds=5)

        fold_accuracies = []
        fold_recalls = []
        fold_precisions =[]
        fold_aucs = []
        fold_f1s = []

        for fold_idx, (train, test) in enumerate(kf):
            X_train, X_test = X[train], X[test]
            y_train, y_test = y[train], y[test]

            if undersample:
                # Get all the targets that are not relevant i.e., y = 0
                idx_undersample = np.where(y_train == -1)[0]

                # Get all the targets that are relevant i.e., y = 1
                idx_positive = np.where(y_train == 1)[0]
                # Now sample from the no relevant targets
                random_negative_sample = np.random.choice(idx_undersample, idx_positive.shape[0])

                X_train_positive = X_train[idx_positive]

                X_train_negative = X_train[random_negative_sample]

                X_train_undersample = np.hstack((X_train_positive, X_train_negative))

                y_train_positive = y_train[idx_positive]
                y_train_negative = y_train[random_negative_sample]
                y_train_undersample = np.hstack((y_train_positive, y_train_negative))

            count_vec = CountVectorizer(ngram_range=(1, 3), max_features=50000)

            count_vec.fit(X_train)

            if undersample:
                X_train = X_train_undersample
                y_train = y_train_undersample

            X_train_undersample = count_vec.transform(X_train)
            X_test = count_vec.transform(X_test)

            svm = SVM()
            svm.train(X_train_undersample, y_train)
            svm.test(X_test, y_test)

            f1_score = svm.metrics["F1"]
            precision = svm.metrics["Precision"]
            recall = svm.metrics["Recall"]
            auc = svm.metrics["AUC"]
            accuracy = svm.metrics["Accuracy"]

            fold_accuracies.append(accuracy)
            fold_precisions.append(precision)
            fold_recalls.append(recall)
            fold_aucs.append(auc)
            fold_f1s.append(f1_score)

        average_accuracy = np.mean(fold_accuracies)
        average_precision = np.mean(fold_precisions)
        average_recall = np.mean(fold_recalls)
        average_auc = np.mean(fold_aucs)
        average_f1 = np.mean(fold_f1s)

        print("Fold Average Accuracy: {}".format(average_accuracy))
        print("Fold Average F1: {}".format(average_f1))
        print("Fold Average Precision: {}".format(average_precision))
        print("Fold Average AUC: {}".format(average_auc))
        print("Fold Average Recall: {}".format(average_recall))
        print('\n')
Esempio n. 9
0
def main():
    dm_model = Doc2Vec.load('400_pvdm_doc2vec.d2v')
    dbow_model = Doc2Vec.load('400_pvdbow_doc2vec.d2v')

    #Load datasets for classfying
    path = 'datasets/'
    doc2vec_vector_size = 400
    files = [f for f in listdir(path) if isfile(join(path, f))]
    files.pop(0)

    data_loader = DataLoader(path)

    domains = data_loader.csv_files

    names = {1: 'title', 4: 'abstract', 5: 'mesh', 'y': 6}

    domain_features = data_loader.get_feature_matrix(names)
    domain = domain_features.pop(0)
    x, y = domain
    #get size
    n_total_documents = 0

    for domain in domain_features:
        n_total_documents += len(domain[0])
        x = numpy.hstack((x, domain[0]))
        y = numpy.hstack((y, domain[1]))
    x, y = data_loader.create_random_samples(x, y, train_p=.8, test_p=.2)
    train_x, test_x = x
    train_y, test_y = y
    transformed_train_x = data_loader.get_transformed_features(
        train_x, sparse=True, tfidf=True, add_index_vector=False)
    transformed_test_x = data_loader.get_transformed_features(test_x,
                                                              sparse=True,
                                                              tfidf=True)
    all_features = numpy.zeros(shape=(n_total_documents, 800))
    all_labels = numpy.asarray([])

    i = 0

    dbow_dm_train_x = numpy.zeros((train_x.shape[0], 2 * doc2vec_vector_size))
    dbow_dm_test_x = numpy.zeros((test_x.shape[0], 2 * doc2vec_vector_size))
    """
        Set up the feature for the SVM by iterating through all the word vectors.
        Pre process each vector and then feed into doc2vec model, both the distributed memory
        and distributed bag of words. Concatenate the vectors for better classification results
        as per paragraph to vector paper by Mikolv.
    """
    for feature_vector in train_x:
        preprocessed_line = list(
            Doc2vec.Doc2VecTool.preprocess_line(feature_vector))
        dbow_dm_train_x[i, 0:400] = dm_model.infer_vector(preprocessed_line)
        dbow_dm_train_x[i, 400:] = dbow_model.infer_vector(preprocessed_line)
        i += 1
    """
        Do the same as above but for the test set.
    """

    i = 0

    for feature_vector in test_y:
        preprocessed_line = list(
            Doc2vec.Doc2VecTool.preprocess_line(feature_vector))
        dbow_dm_test_x[i, 0:400] = dm_model.infer_vector(preprocessed_line)
        dbow_dm_test_x[i, 400:] = dbow_model.infer_vector(preprocessed_line)
        i += 1

    print("Training doc2vec SVM")
    #Train SVM on classic bow
    svm = SVM()
    svm.train(dbow_dm_train_x, train_y)
    svm.test(dbow_dm_test_x, test_y)
    print("end of training doc2vec bow SVM\n")

    print("Training classic bow SVM")
    #Train SVM on classic bow
    svm = SVM()
    svm.train(transformed_train_x, train_y)
    svm.test(transformed_test_x, test_y)
    print("end of training classic bow SVM\n")
def train_and_test(C, tolerance):
    svm = SVM(C=C, tolerance=tolerance)
    svm.train()
    acc = svm.test()
    # print(os.getpid(), acc)
    return [acc, svm]
Esempio n. 11
0
def transfer_learning(print_output=True):
    path = 'datasets/'
    data_loader = DataLoader(path)
    names = {1: 'title', 4: 'abstract', 5: 'mesh', 'y': 6}
    transformed_data_sets = []

    path = 'datasets/'

    files = [f for f in listdir(path) if isfile(join(path, f))]
    files.pop(0)
    data_loader = DataLoader(path)
    domains = data_loader.csv_files
    all_domains = copy.deepcopy(domains)
    training_domains = data_loader.csv_files
    all_domains_svm_wda_metrics_list = []
    all_domains_svm_metrics_list = []
    all_domains_svm_bow_mlp_list = []
    all_domains_mlp_fold_scores = []

    for i, held_out_domain in enumerate(domains):
        training_domains.pop(i)
        names = {1: 'title', 4: 'abstract', 5: 'mesh', 'y': 6}
        svm_wda_metrics_list = []
        svm_metrics_list = []
        svm_bow_mlp_list = []

        folder_name = '/' + files[i]
        domain_name = files[i].__str__()
        domain_name = domain_name.split('.')[0]
        folder_name = 'output' + '/' + domain_name

        output = "Dataset: {}".format(files[i])
        if print_output:
            print(output)

        #shuffle(data_loader.csv_files)
        data_loader.csv_files = training_domains
        data_sets = data_loader.csv_files
        domains = data_loader.get_feature_matrix(names)

        #Get one file out of the csv files in the dataloader use this as the held out domain

        #Get the feature representation of the held out data
        held_out_x, held_out_y = data_loader.get_feature_matrix(
            names, held_out_domain)
        #Create the folds for the held out data in this case the default 5
        folds = data_loader.cross_fold_valdation(held_out_x, held_out_y)
        #Get the total number of domains i.e., the number of files with documents
        n_source_domains = len(data_sets)
        os.makedirs(folder_name)

        #Must convert the data type of the matrix for theano
        feature_engineer = Feature_Engineer()

        #Start the 5 fold cross validation
        for n_fold, fold in enumerate(folds):
            output = "Fold {}: \n".format(n_fold)
            if print_output:
                print(output)
            output = '{}/{}/fold_{}.csv'.format(os.getcwd(), folder_name,
                                                (n_fold + 1))
            file = open(output, 'w')
            csv_writer = csv.writer(file)

            #Each sample is a list that contains the x and y for the classifier
            #Typically fold[0] would be the train sample but because it is switched for
            #testing the effectiveness of the domain adaptation
            train_sample = fold[1]
            test_sample = fold[0]

            #These are the original copies to be copied over the augmented feature matrix
            #Each sample contains the text and y labels from the data before it is put into the sklearn count vectorizer
            train_x, train_y = train_sample
            test_x, test_y = test_sample

            train_y[train_y == 0] = 2
            train_y[train_y == 1] = 3
            test_y[test_y == 0] = 2
            test_y[test_y == 1] = 3

            #Get the bag of words representation of the small 20% target source data and transform the other 80%
            #of the data.
            train_x = data_loader.get_transformed_features(
                train_x, True, False, True)
            test_x = data_loader.transform(test_x, True, True)

            transformed_domains = []

            #Transform the domains with respect to the training data
            for domain in domains:
                domain_x, domain_y = domain
                transformed_domain_x = data_loader.transform(
                    domain_x, True, True)
                transformed_domain_x, domain_y = data_loader.underSample(
                    transformed_domain_x, domain_y)
                transformed_domains.append([transformed_domain_x, domain_y])

            augmented_feature_matrix_train, augmented_y_train = feature_engineer.augmented_feature_matrix(
                transformed_domains, [train_x, train_y])
            augmented_feature_matrix_test, augmented_y_test = feature_engineer.augmented_feature_matrix(
                held_out_domain=[test_x, test_y],
                train_or_test=False,
                n_source_domains=len(transformed_domains))
            augmented_y_test[augmented_y_test == 2] = 0
            augmented_y_test[augmented_y_test == 3] = 1
            #SVM with the augmented feature matrix for domain adaptation
            svm_wda = SVM()
            svm_wda.train(augmented_feature_matrix_train, augmented_y_train)
            svm_wda.test(augmented_feature_matrix_test, augmented_y_test)
            output = "\nSVM with domain adaptation metrics:"
            csv_writer.writerow([output])
            if print_output:
                print(output)
                print(svm_wda)
                print("\n")
            svm_wda_metrics_list.append(svm_wda.metrics)

            classifier = NeuralNet(n_hidden_units=[250],
                                   output_size=4,
                                   batch_size=20,
                                   n_epochs=200,
                                   dropout=True,
                                   activation_function='relu',
                                   learning_rate=.3,
                                   momentum=True,
                                   momentum_term=.5)
            write_to_csv(svm_wda.metrics, csv_writer)

            y_for_mlp = []
            #Set up the x and y data for the MLP
            for p, domain in enumerate(transformed_domains):
                domain_x, domain_y = domain
                domain_x = domain_x.todense()
                y_for_mlp.append(domain_y)

                if p == 0:
                    neural_net_x_train = domain_x
                    neural_net_y_train = domain_y
                else:
                    neural_net_x_train = numpy.vstack(
                        (neural_net_x_train, domain_x))
                    neural_net_y_train = numpy.hstack(
                        (neural_net_y_train, domain_y))

            neural_net_x_train = numpy.float_(neural_net_x_train)

            classifier.train(neural_net_x_train, neural_net_y_train)

            test_y[test_y == 2] = 0
            test_y[test_y == 3] = 1
            svm_y_train = neural_net_y_train
            svm_y_train[svm_y_train == 2] = 0
            svm_y_train[svm_y_train == 3] = 1

            #SVM without the domain adaptation
            svm = SVM()
            svm.train(sparse.coo_matrix(neural_net_x_train), svm_y_train)
            svm.test(test_x, test_y)
            output = "\nSVM without domain adaptation"
            if print_output:
                print(output)
                print(svm)
                print("\n")
            csv_writer.writerow([output])
            svm_metrics_list.append(svm.metrics)
            write_to_csv(svm.metrics, csv_writer)

            #Transform the feature vectors of the held out data to the learned hidden layer features of the previous
            #MLP trained with all n-1 datasets

            perceptron_train_x = theano.shared(neural_net_x_train)
            perceptron_test_x = theano.shared(test_x.todense())

            transformed_perceptron_train_x = classifier.transfer_learned_weights(
                perceptron_train_x)
            transformed_perceptron_test_x = classifier.transfer_learned_weights(
                perceptron_test_x)

            modified_transformed_perceptron_train_x = numpy.hstack(
                (transformed_perceptron_train_x, neural_net_x_train))
            modified_transformed_perceptron_test_x = numpy.hstack(
                (transformed_perceptron_test_x, test_x.todense()))

            output = "\nSVM with BoW and transformed features"
            csv_writer.writerow([output])
            if print_output:
                print(output)
            svm_mlp_bow = SVM()
            svm_mlp_bow.train(
                sparse.coo_matrix(modified_transformed_perceptron_train_x),
                svm_y_train)
            svm_mlp_bow.test(
                sparse.coo_matrix(modified_transformed_perceptron_test_x),
                test_y)
            write_to_csv(svm_mlp_bow.metrics, csv_writer)
            if print_output:
                print(svm_mlp_bow)
            svm_bow_mlp_list.append(svm_mlp_bow.metrics)

            output = "*********** End of fold {} ***********".format(n_fold)

            if print_output:
                print(output)

        training_domains = copy.deepcopy(all_domains)
        file_name = '{}/{}/fold_averages.csv'.format(os.getcwd(), folder_name)
        file = open(file_name, 'w+')
        csv_writer = csv.writer(file)

        if print_output:
            output = "----------------------------------------------------------------------------------------" \
                     "\nFold Scores\n " \
                     "SVM with domain adaptation"
            print_write_output(output, svm_wda_metrics_list,
                               all_domains_svm_wda_metrics_list, csv_writer)

            output = "\nSVM without domain adaptation"
            print_write_output(output, svm_metrics_list,
                               all_domains_svm_metrics_list, csv_writer)

            output = "SVM with BoW and transformed features"
            print_write_output(output, svm_bow_mlp_list,
                               all_domains_svm_bow_mlp_list, csv_writer)

    file_name = '{}/output/all_fold_averages.csv'.format(os.getcwd())
    file = open(file_name, 'w+')
    csv_writer = csv.writer(file)
    if print_output:
        output = "*******************************************************************************************" \
                 "\nAll domain macro metric scores\n " \
                 "SVM with domain adaptation"
        print_macro_scores("SVM with domain adaptation",
                           all_domains_svm_wda_metrics_list, csv_writer)

        output = "\nSVM without domain adaptation"
        print_macro_scores(output, all_domains_svm_metrics_list, csv_writer)

        output = "SVM with BoW and transformed features"
        print_macro_scores(output, all_domains_svm_bow_mlp_list, csv_writer)
Esempio n. 12
0
def main():
    dm_model = Doc2Vec.load('400_pvdm_doc2vec.d2v')
    dbow_model = Doc2Vec.load('400_pvdbow_doc2vec.d2v')

    #Load datasets for classfying
    path = 'datasets/'
    doc2vec_vector_size = 400
    files = [f for f in listdir(path) if isfile(join(path,f))]
    files.pop(0)

    data_loader = DataLoader(path)

    domains = data_loader.csv_files


    names = {1: 'title', 4: 'abstract', 5: 'mesh', 'y': 6}

    domain_features = data_loader.get_feature_matrix(names)
    domain = domain_features.pop(0)
    x, y = domain
    #get size
    n_total_documents = 0

    for domain in domain_features:
        n_total_documents+=len(domain[0])
        x = numpy.hstack((x, domain[0]))
        y = numpy.hstack((y, domain[1]))
    x, y = data_loader.create_random_samples(x, y, train_p=.8, test_p=.2)
    train_x, test_x = x
    train_y, test_y = y
    transformed_train_x = data_loader.get_transformed_features(train_x, sparse=True, tfidf=True, add_index_vector=False)
    transformed_test_x = data_loader.get_transformed_features(test_x, sparse=True, tfidf=True)
    all_features = numpy.zeros(shape=(n_total_documents, 800))
    all_labels = numpy.asarray([])

    i = 0

    dbow_dm_train_x = numpy.zeros((train_x.shape[0], 2*doc2vec_vector_size))
    dbow_dm_test_x = numpy.zeros((test_x.shape[0], 2*doc2vec_vector_size))

    """
        Set up the feature for the SVM by iterating through all the word vectors.
        Pre process each vector and then feed into doc2vec model, both the distributed memory
        and distributed bag of words. Concatenate the vectors for better classification results
        as per paragraph to vector paper by Mikolv.
    """
    for feature_vector in train_x:
        preprocessed_line = list(Doc2vec.Doc2VecTool.preprocess_line(feature_vector))
        dbow_dm_train_x[i, 0:400] = dm_model.infer_vector(preprocessed_line)
        dbow_dm_train_x[i, 400:] = dbow_model.infer_vector(preprocessed_line)
        i+=1

    """
        Do the same as above but for the test set.
    """

    i = 0

    for feature_vector in test_y:
        preprocessed_line = list(Doc2vec.Doc2VecTool.preprocess_line(feature_vector))
        dbow_dm_test_x[i, 0:400] = dm_model.infer_vector(preprocessed_line)
        dbow_dm_test_x[i, 400:] = dbow_model.infer_vector(preprocessed_line)
        i+=1

    print("Training doc2vec SVM")
    #Train SVM on classic bow
    svm = SVM()
    svm.train(dbow_dm_train_x, train_y)
    svm.test(dbow_dm_test_x, test_y)
    print("end of training doc2vec bow SVM\n")


    print("Training classic bow SVM")
    #Train SVM on classic bow
    svm = SVM()
    svm.train(transformed_train_x, train_y)
    svm.test(transformed_test_x, test_y)
    print("end of training classic bow SVM\n")