Esempio n. 1
0
def crossValidation(X, Y, k, kernel=np.dot):
  # answerを正解としてresultの正解率を計算
  def accuracyRate(result, answer):
    return 1.0 - sum(abs(result - answer)/2) / float(len(result))

  n = len(X)  # データ点の個数
  l = n / k   # k分割したデータ点の個数
  ac = 0.0    # 正解率の初期化

  # k-交差検定を行い正解率を計算
  for i in range(k):
    # l個の評価ベクトルとそのクラス
    testVectors = X[l*i:l*(i+1)]
    classForTestVectors = Y[l*i:l*(i+1)]
    # n-l個の訓練ベクトルとそのクラス
    learningVectors = np.vstack((X[:l*i], X[l*(i+1):]))
    classForlearningVectors = np.hstack((Y[:l*i], Y[l*(i+1):]))

    # 訓練ベクトルからサポートベクターを計算
    svm = SVM(learningVectors, classForlearningVectors, kernel)
    # 学習した識別関数で評価ベクトルを識別
    result = [svm.discriminate(t) for t in testVectors]
    # 評価結果の正解率を算出
    ac += accuracyRate(result, classForTestVectors)

  # 正解率の平均を返す
  return ac / k
    def run_test(self, X, y, kernel):
        n = int(X.shape[0] * 0.8)
        K = self.gram_matrix(X, kernel)

        svm = SVM(kernel, 1.0, K)
        svm.fit(np.arange(n), y[:n])
        score = svm.score(np.arange(n, X.shape[0]), y[n:])

        return score
    def test_simple(self):
        X = np.array([[1.0, 1.0],[2.0, 2.0],[3.0, 3.0],[4.0, 4.0],
                      [1.0, 1.0],[2.4, 2.4],[2.6, 2.6],[4.0, 4.0]])
        y = np.array([0.0, 0.0, 1.0, 1.0])
        K = self.gram_matrix(X, kernels.linear)
        svm = SVM(kernels.linear, 1.0, K)
        svm.fit(np.arange(4), y)

        result = svm.predict(np.arange(4,8))
        np.testing.assert_allclose(result, [0, 0, 1, 1])
Esempio n. 4
0
    def fit(self, X_idx, y):
        self.classes = np.unique(y)
        logging.debug('Fitting %s data points with %s different classes '\
                      'with multiclass svm', X_idx.shape[0], len(self.classes))

        self.svms = []
        for class_a, class_b in itertools.combinations(self.classes, 2):
            filtered_X_idx, filtered_y = self.filter_data(X_idx, y, class_a, class_b)

            svm = SVM(self.kernel, self.C, self.K)
            svm.fit(filtered_X_idx, filtered_y)
            self.svms.append((class_a, class_b, svm))
def main():
    try:
        opts, args = getopt.getopt(sys.argv[1:], '', ['n_feature_maps=', 'epochs=', 'max_words=', 'dropout_p=',
                                                      'undersample=', 'n_feature_maps=', 'criterion=',
                                                      'optimizer=', 'max_words=', 'layers=',
                                                      'hyperopt=', 'experiment_name=', 'w2v_path=', 'tacc=',
                                                      'use_all_date=', 'tacc=', 'pretrain=', 'undersample_all=',
                                                      'save_model=', 'transfer_learning='])
    except getopt.GetoptError as error:
        print(error)
        sys.exit(2)

    w2v_path = '/Users/ericrincon/PycharmProjects/Deep-PICO/wikipedia-pubmed-and-PMC-w2v.bin'
    epochs = 50
    criterion = 'categorical_crossentropy'
    optimizer = 'adam'
    experiment_name = 'abstractCNN'
    w2v_size = 200
    activation = 'relu'
    dense_sizes = [400, 400]
    max_words = {'text': 270, 'mesh': 50, 'title': 17}

    filter_sizes = {'text': [2, 3, 4, 5],
                    'mesh': [2, 3, 4, 5],
                    'title': [2, 3, 4, 5]}
    n_feature_maps = {'text': 100, 'mesh': 50, 'title': 50}
    word_vector_size = 200
    using_tacc = False
    undersample = False
    use_embedding = False
    embedding = None
    use_all_date = False
    patience = 50
    p = .5
    verbose = 0
    pretrain = True
    filter_small_data = True
    save_model = False
    load_data_from_scratch = False
    print_output = True
    transfer_learning = False

    for opt, arg in opts:
        if opt == '--save_model':
            if int(arg) == 0:
                save_model = False
            elif int(arg) ==  1:
                save_model = True
        elif opt == '--transfer_learning':
            if int(arg) == 1:
                transfer_learning = True
            elif int(arg) == 0:
                transfer_learning = False
        elif opt == '--undersample_all':
            if int(arg) == 0:
                undersample_all = False
            elif int(arg) == 1:
                undersample_all = True
        elif opt == '--pretrain':
            if int(arg) == 0:
                pretrain = False
            elif int(arg) == 1:
                pretrain = True
            else:
                print("Invalid input")

        elif opt == '--verbose':
            verbose = int(arg)
        elif opt == '--use_embedding':
            if int(arg) == 0:
                use_embedding = False
        elif opt == '--dropout_p':
            p = float(arg)
        elif opt == '--epochs':
            epochs = int(arg)
        elif opt == '--layers':
            layer_sizes = arg.split(',')
        elif opt == '--n_feature_maps':
            n_feature_maps = int(arg)
        elif opt == '--n_feature_maps':
            n_feature_maps = int(arg)
        elif opt == '--criterion':
            criterion = arg
        elif opt == '--optimizer':
            optimizer = arg
        elif opt == '--tacc':
            if int(arg) == 1:
                using_tacc = True
        elif opt == '--hyperopt':
            if int(arg) == 1:
                hyperopt = True
        elif opt == '--experiment_name':
            experiment_name = arg
        elif opt == '--max_words':
            max_words = int(arg)
        elif opt == '--w2v_path':
            w2v_path = arg
        elif opt == '--word_vector_size':
            word_vector_size = int(arg)
        elif opt == '--use_all_data':
            if int(arg) == 1:
                use_all_date = True
        elif opt == '--patience':
            patience = int(arg)

        elif opt == '--undersample':
            if int(arg) == 0:
                undersample = False
            elif int(arg) == 1:
                undersample = True
        elif opt == '--tacc':
            if int(arg) == 1:
                using_tacc = True

        else:
            print("Option {} is not valid!".format(opt))


    if using_tacc:
        nltk.data.path.append('/work/03186/ericr/nltk_data/')
    print('Loading data...')

    if load_data_from_scratch:

        print('Loading Word2Vec...')
        w2v = Word2Vec.load_word2vec_format(w2v_path, binary=True)
        print('Loaded Word2Vec...')
        X_list = []
        y_list = []

        if use_embedding:

            X_list, y_list, embedding_list = DataLoader.get_data_as_seq(w2v, w2v_size, max_words)

        else:
            X_list, y_list = DataLoader.get_data_separately(max_words, word_vector_size,
                                                            w2v, use_abstract_cnn=True,
                                                            preprocess_text=False,
                                                            filter_small_data=filter_small_data)
    else:
        X_list, y_list = DataLoader.load_datasets_from_h5py('DataProcessed', True)


    print('Loaded data...')
    dataset_names = DataLoader.get_all_files('DataProcessed')
    dataset_names = [x.split('/')[-1].split('.')[0] for x in dataset_names]

    results_file = open(experiment_name + "_results.txt", "w+")

    for dataset_i, (X, y) in enumerate(zip(X_list, y_list)):
        if use_embedding:
            embedding = embedding_list[dataset_i]

        model_name = dataset_names[dataset_i]

        print("Dataset: {}".format(model_name))

        results_file.write(model_name)
        results_file.write("Dataset: {}".format(model_name))

        X_abstract, X_title, X_mesh = X['text'], X['title'], X['mesh']
        n = X_abstract.shape[0]
        kf = KFold(n, random_state=1337, shuffle=True, n_folds=5)

        if pretrain:
            pretrain_fold_accuracies = []
            pretrain_fold_recalls = []
            pretrain_fold_precisions =[]
            pretrain_fold_aucs = []
            pretrain_fold_f1s = []

        if transfer_learning:
            svm_fold_accuracies = []
            svm_fold_recalls = []
            svm_fold_precisions =[]
            svm_fold_aucs = []
            svm_fold_f1s = []

        fold_accuracies = []
        fold_recalls = []
        fold_precisions =[]
        fold_aucs = []
        fold_f1s = []

        for fold_idx, (train, test) in enumerate(kf):
            temp_model_name = experiment_name + '_' + model_name + '_fold_{}'.format(fold_idx + 1)


            cnn = AbstractCNN(n_classes=2,  max_words=max_words, w2v_size=w2v_size, vocab_size=1000, use_embedding=use_embedding,
                              filter_sizes=filter_sizes, n_feature_maps=n_feature_maps, dense_layer_sizes=dense_sizes.copy(),
                              name=temp_model_name, activation_function=activation, dropout_p=p, embedding=embedding)

            if pretrain:
                X_abstract_train = X_abstract[train, :, :]
                X_title_train = X_title[train, :, :]
                X_mesh_train = X_mesh[train, :, :]

                y_train = y[train, :]

                X_abstract_test = X_abstract[test, :, :]
                X_title_test = X_title[test, :, :]
                X_mesh_test = X_mesh[test, :, :]

                y_test = y[test, :]

                for i, (_x, _y) in enumerate(zip(X_list, y_list)):
                    if not i == dataset_i:
                        X_abstract_train = np.vstack((X_abstract_train, _x['text'][()]))
                        X_title_train = np.vstack((X_title_train, _x['title'][()]))
                        X_mesh_train = np.vstack((X_mesh_train, _x['mesh'][()]))
                        y_train = np.vstack((y_train, _y[()]))
                print(X_abstract_train.shape)

                cnn.train(X_abstract_train, X_title_train, X_mesh_train, y_train, n_epochs=epochs,
                          optim_algo=optimizer, criterion=criterion, verbose=verbose, patience=patience,
                          save_model=save_model)


                accuracy, f1_score, precision, auc, recall = cnn.test(X_abstract_test, X_title_test, X_mesh_test, y_test,
                                                                      print_output=True)

                print("Results from training on all data only")

                print("Accuracy: {}".format(accuracy))
                print("F1: {}".format(f1_score))
                print("Precision: {}".format(precision))
                print("AUC: {}".format(auc))
                print("Recall: {}".format(recall))
                print("\n")

                pretrain_fold_accuracies.append(accuracy)
                pretrain_fold_precisions.append(precision)
                pretrain_fold_recalls.append(recall)
                pretrain_fold_aucs.append(auc)
                pretrain_fold_f1s.append(f1_score)

            if not use_embedding:
                X_abstract_train = X_abstract[train, :, :]
                X_title_train = X_title[train, :, :]
                X_mesh_train = X_mesh[train, :, :]
                y_train = y[train, :]

                X_abstract_test = X_abstract[test, :, :]
                X_titles_test = X_title[test, :, :]
                X_mesh_test = X_mesh[test, :, :]
                y_test = y[test, :]

            elif use_embedding:
                X_abstract_train = X_abstract[train]
                X_title_train = X_title[train]
                X_mesh_train = X_mesh[train]
                y_train = y[train, :]

                X_abstract_test = X_abstract[test]
                X_titles_test = X_title[test]
                X_mesh_test = X_mesh[test]
                y_test = y[test, :]

                if undersample:
                    X_abstract_train, X_title_train, X_mesh_train, y_train = \
                        DataLoader.undersample_seq(X_abstract_train, X_title_train, X_mesh_train, y_train)

            cnn.train(X_abstract_train, X_title_train, X_mesh_train, y_train, n_epochs=epochs, optim_algo=optimizer,
                      criterion=criterion, verbose=verbose, patience=patience,
                      save_model=save_model)
            accuracy, f1_score, precision, auc, recall = cnn.test(X_abstract_test, X_titles_test, X_mesh_test, y_test,
                                                                  print_output)

            if transfer_learning:
                svm = SVM()

                # Transfer weights
                X_transfer_train = cnn.output_learned_features([X_abstract_train, X_title_train, X_mesh_train])
                X_transfer_test = cnn.output_learned_features([X_abstract_test, X_titles_test, X_mesh_test])

                svm.train(X_transfer_train, DataLoader.onehot2list(y_train))
                svm.test(X_transfer_test, DataLoader.onehot2list(y_test))

                print("\nSVM results")
                print(svm)
                print('\n')

                svm_fold_accuracies.append(svm.metrics['Accuracy'])
                svm_fold_precisions.append(svm.metrics['Precision'])
                svm_fold_aucs.append(svm.metrics['AUC'])
                svm_fold_recalls.append(svm.metrics['Recall'])
                svm_fold_f1s.append(svm.metrics['F1'])

            print('CNN results')
            print("Accuracy: {}".format(accuracy))
            print("F1: {}".format(f1_score))
            print("Precision: {}".format(precision))
            print("AUC: {}".format(auc))
            print("Recall: {}".format(recall))

            fold_accuracies.append(accuracy)
            fold_precisions.append(precision)
            fold_recalls.append(recall)
            fold_aucs.append(auc)
            fold_f1s.append(f1_score)



        if pretrain:
            pretrain_average_accuracy = np.mean(pretrain_fold_accuracies)
            pretrain_average_precision = np.mean(pretrain_fold_precisions)
            pretrain_average_recall = np.mean(pretrain_fold_recalls)
            pretrain_average_auc = np.mean(pretrain_fold_aucs)
            pretrain_average_f1 = np.mean(pretrain_fold_f1s)

            print("\nAverage results from using all data")
            print("Fold Average Accuracy: {}".format(pretrain_average_accuracy))
            print("Fold Average F1: {}".format(pretrain_average_f1))
            print("Fold Average Precision: {}".format(pretrain_average_precision))
            print("Fold Average AUC: {}".format(pretrain_average_auc))
            print("Fold Average Recall: {}".format(pretrain_average_recall))
            print('\n')



        average_accuracy = np.mean(fold_accuracies)
        average_precision = np.mean(fold_precisions)
        average_recall = np.mean(fold_recalls)
        average_auc = np.mean(fold_aucs)
        average_f1 = np.mean(fold_f1s)


        print('CNN Results')
        print("Fold Average Accuracy: {}".format(average_accuracy))
        print("Fold Average F1: {}".format(average_f1))
        print("Fold Average Precision: {}".format(average_precision))
        print("Fold Average AUC: {}".format(average_auc))
        print("Fold Average Recall: {}".format(average_recall))
        print('\n')

        results_file.write("CNN results\n")
        results_file.write("Fold Average Accuracy: {}\n".format(average_accuracy))
        results_file.write("Fold Average F1: {}\n".format(average_f1))
        results_file.write("Fold Average Precision: {}\n".format(average_precision))
        results_file.write("Fold Average AUC: {}\n".format(average_auc))
        results_file.write("Fold Average Recall: {}\n".format(average_recall))
        results_file.write('\n')

        if transfer_learning:
            average_accuracy = np.mean(svm_fold_accuracies)
            average_precision = np.mean(svm_fold_precisions)
            average_recall = np.mean(svm_fold_recalls)
            average_auc = np.mean(svm_fold_aucs)
            average_f1 = np.mean(svm_fold_f1s)

            print("SVM with cnn features")
            print("Fold Average Accuracy: {}".format(average_accuracy))
            print("Fold Average F1: {}".format(average_f1))
            print("Fold Average Precision: {}".format(average_precision))
            print("Fold Average AUC: {}".format(average_auc))
            print("Fold Average Recall: {}".format(average_recall))
            print('\n')

            results_file.write("SVM with cnn features\n")
            results_file.write("Fold Average Accuracy: {}\n".format(average_accuracy))
            results_file.write("Fold Average F1: {}\n".format(average_f1))
            results_file.write("Fold Average Precision: {}\n".format(average_precision))
            results_file.write("Fold Average AUC: {}\n".format(average_auc))
            results_file.write("Fold Average Recall: {}\n".format(average_recall))
            results_file.write('\n')
Esempio n. 6
0
def plot(predictor, X, y, grid_size):
    x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
    y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
    xx, yy = np.meshgrid(np.linspace(x_min, x_max, grid_size), np.linspace(y_min, y_max, grid_size), indexing="ij")
    flatten = lambda m: np.array(m).reshape(-1)

    result = []
    for (i, j) in itertools.product(range(grid_size), range(grid_size)):
        point = np.array([xx[i, j], yy[i, j]]).reshape(1, 2)
        result.append(predictor.predict(point))

    Z = np.array(result).reshape(xx.shape)

    plt.contourf(xx, yy, Z, cmap=cm.Paired, levels=[-0.001, 0.001], extend="both", alpha=0.8)
    plt.scatter(flatten(X[:, 0]), flatten(X[:, 1]), c=flatten(y), cmap=cm.Paired)
    plt.xlim(x_min, x_max)
    plt.ylim(y_min, y_max)
    plt.show()


num_samples = 500
num_features = 2
grid_size = 20
samples = np.matrix(np.random.normal(size=num_samples * num_features).reshape(num_samples, num_features))
labels = 2 * (samples.sum(axis=1) > 0) - 1.0
model = SVM(1.0, Kernel.linear())
print samples[0]
model.fit(samples, labels)
plot(model, samples, labels, grid_size)
Esempio n. 7
0
def transfer_learning(print_output=True):
    path = 'datasets/'
    data_loader = DataLoader(path)
    names = {1: 'title', 4: 'abstract', 5: 'mesh', 'y': 6}
    transformed_data_sets = []

    path = 'datasets/'

    files = [f for f in listdir(path) if isfile(join(path,f))]
    files.pop(0)
    data_loader = DataLoader(path)
    domains = data_loader.csv_files
    all_domains = copy.deepcopy(domains)
    training_domains = data_loader.csv_files
    all_domains_svm_wda_metrics_list = []
    all_domains_svm_metrics_list = []
    all_domains_svm_bow_mlp_list = []
    all_domains_mlp_fold_scores = []

    for i, held_out_domain in enumerate(domains):
        training_domains.pop(i)
        names = {1: 'title', 4: 'abstract', 5: 'mesh', 'y': 6}
        svm_wda_metrics_list = []
        svm_metrics_list = []
        svm_bow_mlp_list = []

        folder_name = '/' + files[i]
        domain_name = files[i].__str__()
        domain_name = domain_name.split('.')[0]
        folder_name = 'output' + '/' + domain_name

        output = "Dataset: {}".format(files[i])
        if print_output:
            print(output)

        #shuffle(data_loader.csv_files)
        data_loader.csv_files = training_domains
        data_sets = data_loader.csv_files
        domains = data_loader.get_feature_matrix(names)

        #Get one file out of the csv files in the dataloader use this as the held out domain

        #Get the feature representation of the held out data
        held_out_x, held_out_y = data_loader.get_feature_matrix(names, held_out_domain)
        #Create the folds for the held out data in this case the default 5
        folds = data_loader.cross_fold_valdation(held_out_x, held_out_y)
        #Get the total number of domains i.e., the number of files with documents
        n_source_domains = len(data_sets)
        os.makedirs(folder_name)

        #Must convert the data type of the matrix for theano
        feature_engineer = Feature_Engineer()

        #Start the 5 fold cross validation
        for n_fold, fold in enumerate(folds):
            output = "Fold {}: \n".format(n_fold)
            if print_output:
                print(output)
            output = '{}/{}/fold_{}.csv'.format(os.getcwd(), folder_name, (n_fold + 1))
            file = open(output, 'w')
            csv_writer = csv.writer(file)

            #Each sample is a list that contains the x and y for the classifier
            #Typically fold[0] would be the train sample but because it is switched for
            #testing the effectiveness of the domain adaptation
            train_sample = fold[1]
            test_sample = fold[0]

            #These are the original copies to be copied over the augmented feature matrix
            #Each sample contains the text and y labels from the data before it is put into the sklearn count vectorizer
            train_x, train_y = train_sample
            test_x, test_y = test_sample

            train_y[train_y == 0] = 2
            train_y[train_y == 1] = 3
            test_y[test_y == 0] = 2
            test_y[test_y == 1] = 3


            #Get the bag of words representation of the small 20% target source data and transform the other 80%
            #of the data.
            train_x = data_loader.get_transformed_features(train_x, True, False, True)
            test_x = data_loader.transform(test_x, True, True)

            transformed_domains = []

            #Transform the domains with respect to the training data
            for domain in domains:
                domain_x, domain_y = domain
                transformed_domain_x = data_loader.transform(domain_x, True, True)
                transformed_domain_x, domain_y = data_loader.underSample(transformed_domain_x, domain_y)
                transformed_domains.append([transformed_domain_x, domain_y])

            augmented_feature_matrix_train, augmented_y_train = feature_engineer.augmented_feature_matrix(transformed_domains,
                                                                                              [train_x, train_y])
            augmented_feature_matrix_test, augmented_y_test = feature_engineer.augmented_feature_matrix(held_out_domain=[test_x, test_y],
                                                                                                        train_or_test=False,
                                                                                                        n_source_domains=len(transformed_domains))
            augmented_y_test[augmented_y_test == 2] = 0
            augmented_y_test[augmented_y_test == 3] = 1
            #SVM with the augmented feature matrix for domain adaptation
            svm_wda = SVM()
            svm_wda.train(augmented_feature_matrix_train, augmented_y_train)
            svm_wda.test(augmented_feature_matrix_test, augmented_y_test)
            output = "\nSVM with domain adaptation metrics:"
            csv_writer.writerow([output])
            if print_output:
                print(output)
                print(svm_wda)
                print("\n")
            svm_wda_metrics_list.append(svm_wda.metrics)

            classifier = NeuralNet(n_hidden_units=[250], output_size=4, batch_size=20, n_epochs=200, dropout=True,
                                   activation_function='relu', learning_rate=.3, momentum=True, momentum_term=.5)
            write_to_csv(svm_wda.metrics, csv_writer)


            y_for_mlp = []
            #Set up the x and y data for the MLP
            for p, domain in enumerate(transformed_domains):
                domain_x, domain_y = domain
                domain_x = domain_x.todense()
                y_for_mlp.append(domain_y)

                if p == 0:
                    neural_net_x_train = domain_x
                    neural_net_y_train = domain_y
                else:
                    neural_net_x_train = numpy.vstack((neural_net_x_train, domain_x))
                    neural_net_y_train = numpy.hstack((neural_net_y_train, domain_y))

            neural_net_x_train = numpy.float_(neural_net_x_train)


            classifier.train(neural_net_x_train, neural_net_y_train)

            test_y[test_y == 2] = 0
            test_y[test_y == 3] = 1
            svm_y_train = neural_net_y_train
            svm_y_train[svm_y_train == 2] = 0
            svm_y_train[svm_y_train == 3] = 1

            #SVM without the domain adaptation
            svm = SVM()
            svm.train(sparse.coo_matrix(neural_net_x_train), svm_y_train)
            svm.test(test_x, test_y)
            output = "\nSVM without domain adaptation"
            if print_output:
                print(output)
                print(svm)
                print("\n")
            csv_writer.writerow([output])
            svm_metrics_list.append(svm.metrics)
            write_to_csv(svm.metrics, csv_writer)


            #Transform the feature vectors of the held out data to the learned hidden layer features of the previous
            #MLP trained with all n-1 datasets

            perceptron_train_x = theano.shared(neural_net_x_train)
            perceptron_test_x = theano.shared(test_x.todense())

            transformed_perceptron_train_x = classifier.transfer_learned_weights(perceptron_train_x)
            transformed_perceptron_test_x = classifier.transfer_learned_weights(perceptron_test_x)

            modified_transformed_perceptron_train_x = numpy.hstack((transformed_perceptron_train_x,
                                                                    neural_net_x_train))
            modified_transformed_perceptron_test_x = numpy.hstack((transformed_perceptron_test_x,
                                                                   test_x.todense()))

            output = "\nSVM with BoW and transformed features"
            csv_writer.writerow([output])
            if print_output:
                print(output)
            svm_mlp_bow = SVM()
            svm_mlp_bow.train(sparse.coo_matrix(modified_transformed_perceptron_train_x), svm_y_train)
            svm_mlp_bow.test(sparse.coo_matrix(modified_transformed_perceptron_test_x), test_y)
            write_to_csv(svm_mlp_bow.metrics, csv_writer)
            if print_output:
                print(svm_mlp_bow)
            svm_bow_mlp_list.append(svm_mlp_bow.metrics)


            output = "*********** End of fold {} ***********".format(n_fold)

            if print_output:
                print(output)


        training_domains = copy.deepcopy(all_domains)
        file_name = '{}/{}/fold_averages.csv'.format(os.getcwd(), folder_name)
        file = open(file_name, 'w+')
        csv_writer = csv.writer(file)

        if print_output:
            output = "----------------------------------------------------------------------------------------" \
                     "\nFold Scores\n " \
                     "SVM with domain adaptation"
            print_write_output(output, svm_wda_metrics_list, all_domains_svm_wda_metrics_list, csv_writer)

            output = "\nSVM without domain adaptation"
            print_write_output(output, svm_metrics_list, all_domains_svm_metrics_list, csv_writer)

            output = "SVM with BoW and transformed features"
            print_write_output(output, svm_bow_mlp_list, all_domains_svm_bow_mlp_list, csv_writer)



    file_name = '{}/output/all_fold_averages.csv'.format(os.getcwd())
    file = open(file_name, 'w+')
    csv_writer = csv.writer(file)
    if print_output:
        output = "*******************************************************************************************" \
                 "\nAll domain macro metric scores\n " \
                 "SVM with domain adaptation"
        print_macro_scores("SVM with domain adaptation", all_domains_svm_wda_metrics_list, csv_writer)

        output = "\nSVM without domain adaptation"
        print_macro_scores(output, all_domains_svm_metrics_list, csv_writer)

        output = "SVM with BoW and transformed features"
        print_macro_scores(output, all_domains_svm_bow_mlp_list, csv_writer)
def main():



    print("Loading data...")
    X_list, y_list = get_data()

    print("Loaded data...")
    print('\n')
    dataset_names = DataLoader.get_all_files('Data')
    dataset_names = [name.split('/')[1].split('.')[0] for name in dataset_names]
    undersample = True

    for i, (X, y) in enumerate(zip(X_list, y_list)):
        print("Dataset: {}".format(dataset_names[i]))

        X = np.array(X)
        y = np.array(y)

        n = len(X)

        kf = KFold(n, random_state=1337, shuffle=True, n_folds=5)

        fold_accuracies = []
        fold_recalls = []
        fold_precisions =[]
        fold_aucs = []
        fold_f1s = []

        for fold_idx, (train, test) in enumerate(kf):
            X_train, X_test = X[train], X[test]
            y_train, y_test = y[train], y[test]

            if undersample:
                # Get all the targets that are not relevant i.e., y = 0
                idx_undersample = np.where(y_train == -1)[0]

                # Get all the targets that are relevant i.e., y = 1
                idx_positive = np.where(y_train == 1)[0]
                # Now sample from the no relevant targets
                random_negative_sample = np.random.choice(idx_undersample, idx_positive.shape[0])

                X_train_positive = X_train[idx_positive]

                X_train_negative = X_train[random_negative_sample]

                X_train_undersample = np.hstack((X_train_positive, X_train_negative))

                y_train_positive = y_train[idx_positive]
                y_train_negative = y_train[random_negative_sample]
                y_train_undersample = np.hstack((y_train_positive, y_train_negative))

            count_vec = CountVectorizer(ngram_range=(1, 3), max_features=50000)

            count_vec.fit(X_train)

            if undersample:
                X_train = X_train_undersample
                y_train = y_train_undersample

            X_train_undersample = count_vec.transform(X_train)
            X_test = count_vec.transform(X_test)

            svm = SVM()
            svm.train(X_train_undersample, y_train)
            svm.test(X_test, y_test)

            f1_score = svm.metrics["F1"]
            precision = svm.metrics["Precision"]
            recall = svm.metrics["Recall"]
            auc = svm.metrics["AUC"]
            accuracy = svm.metrics["Accuracy"]

            fold_accuracies.append(accuracy)
            fold_precisions.append(precision)
            fold_recalls.append(recall)
            fold_aucs.append(auc)
            fold_f1s.append(f1_score)

        average_accuracy = np.mean(fold_accuracies)
        average_precision = np.mean(fold_precisions)
        average_recall = np.mean(fold_recalls)
        average_auc = np.mean(fold_aucs)
        average_f1 = np.mean(fold_f1s)

        print("Fold Average Accuracy: {}".format(average_accuracy))
        print("Fold Average F1: {}".format(average_f1))
        print("Fold Average Precision: {}".format(average_precision))
        print("Fold Average AUC: {}".format(average_auc))
        print("Fold Average Recall: {}".format(average_recall))
        print('\n')
Esempio n. 9
0
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1]]
X=np.array(X).transpose()
print X.shape


y=np.array(y).flatten(1)
y[y==0]=-1
print y.shape

svms=SVM(X,y)
svms.train()
print len(svms.supportVector)
for i in range(len(svms.supportVector)):
	t=svms.supportVector[i]
	print svms.x[:,t]
svms.prints_test_linear()
Esempio n. 10
0
 #加载词汇列表
 obj1.loadVocabList()
 '''
 预处理邮件数据
 1) 将邮件中所有的单词统一小写处理
 2)将所有的数字统一变为 ‘number’
 3)将所有的邮件统一变为‘emailaddr’
 4)将所有的$统一变为 ‘dollar’
 5)将所有的url统一变为‘httpaddr’
 6) 将html标签都去掉
 7)将所有非字母数字以及下划线_的符号都去掉,将tab 多个空格 等都变成一个space
  '''
 obj1.proMailData()
 #波特词干提取
 obj1.porterStemmer()
 obj1.getWordIndices()
 print obj1.wordIndices
 #print obj1.wordIndices
 #print len(obj1.wordIndices)
 #print len(set(obj1.wordIndices))
 obj1.getFeatures()
 #print obj1.mailFeatures.T
 print shape(obj1.mailFeatures.T)
 svmObj = SVM("data/svm/spamTrain.mat", "data/svm/spamTest.mat", obj1.mailFeatures.T)
 svmObj.processData()
 c = 100
 t = 0
 svmObj.trainModel(c, t)
 t = 2
 svmObj.trainModel(c, t)
 print "耗费的时间为:", time.time() - time_ben
Esempio n. 11
0
def main():
    dm_model = Doc2Vec.load('400_pvdm_doc2vec.d2v')
    dbow_model = Doc2Vec.load('400_pvdbow_doc2vec.d2v')

    #Load datasets for classfying
    path = 'datasets/'
    doc2vec_vector_size = 400
    files = [f for f in listdir(path) if isfile(join(path,f))]
    files.pop(0)

    data_loader = DataLoader(path)

    domains = data_loader.csv_files


    names = {1: 'title', 4: 'abstract', 5: 'mesh', 'y': 6}

    domain_features = data_loader.get_feature_matrix(names)
    domain = domain_features.pop(0)
    x, y = domain
    #get size
    n_total_documents = 0

    for domain in domain_features:
        n_total_documents+=len(domain[0])
        x = numpy.hstack((x, domain[0]))
        y = numpy.hstack((y, domain[1]))
    x, y = data_loader.create_random_samples(x, y, train_p=.8, test_p=.2)
    train_x, test_x = x
    train_y, test_y = y
    transformed_train_x = data_loader.get_transformed_features(train_x, sparse=True, tfidf=True, add_index_vector=False)
    transformed_test_x = data_loader.get_transformed_features(test_x, sparse=True, tfidf=True)
    all_features = numpy.zeros(shape=(n_total_documents, 800))
    all_labels = numpy.asarray([])

    i = 0

    dbow_dm_train_x = numpy.zeros((train_x.shape[0], 2*doc2vec_vector_size))
    dbow_dm_test_x = numpy.zeros((test_x.shape[0], 2*doc2vec_vector_size))

    """
        Set up the feature for the SVM by iterating through all the word vectors.
        Pre process each vector and then feed into doc2vec model, both the distributed memory
        and distributed bag of words. Concatenate the vectors for better classification results
        as per paragraph to vector paper by Mikolv.
    """
    for feature_vector in train_x:
        preprocessed_line = list(Doc2vec.Doc2VecTool.preprocess_line(feature_vector))
        dbow_dm_train_x[i, 0:400] = dm_model.infer_vector(preprocessed_line)
        dbow_dm_train_x[i, 400:] = dbow_model.infer_vector(preprocessed_line)
        i+=1

    """
        Do the same as above but for the test set.
    """

    i = 0

    for feature_vector in test_y:
        preprocessed_line = list(Doc2vec.Doc2VecTool.preprocess_line(feature_vector))
        dbow_dm_test_x[i, 0:400] = dm_model.infer_vector(preprocessed_line)
        dbow_dm_test_x[i, 400:] = dbow_model.infer_vector(preprocessed_line)
        i+=1

    print("Training doc2vec SVM")
    #Train SVM on classic bow
    svm = SVM()
    svm.train(dbow_dm_train_x, train_y)
    svm.test(dbow_dm_test_x, test_y)
    print("end of training doc2vec bow SVM\n")


    print("Training classic bow SVM")
    #Train SVM on classic bow
    svm = SVM()
    svm.train(transformed_train_x, train_y)
    svm.test(transformed_test_x, test_y)
    print("end of training classic bow SVM\n")
Esempio n. 12
0
			pairwiseGTA = Weight.load(args.weight[0])
			GTA_weight = Weight(gta_profs, pairwiseGTA)
			GTA_clusters = GTA_weight.cluster(cluster_type, d)
			GTA_weight.weight(GTA_clusters)
			# Weight Virus
			pairwiseViral = Weight.load(args.weight[1])
			virus_weight = Weight(viral_profs, pairwiseViral)
			virus_clusters = virus_weight.cluster(cluster_type, d)
			virus_weight.weight(virus_clusters)

		# Create SVM
		c = args.c[0]
		kernel = args.kernel[0]
		kernel_var = float(args.kernel[1])

		svm = SVM(gta_profs, viral_profs, c, kernel, kernel_var)

		# Print support vectors
		if args.svs:
			svm.show_svs()

		# Xval	
		if args.xval:
			nfolds = args.xval
			if args.weight:
				result = svm.xval(nfolds, NREPS, pairwiseGTA, pairwiseViral, cluster_type, d)
			else:
				result = svm.xval(nfolds, NREPS)
			if mini:
				print("GTA Correct\tViral Correct")
				print("%.2f\t%.2f" % (result[0], result[1]))
Esempio n. 13
0
# parameters
name = 'stdev2'
print '======Training======'
# load data from csv files
train = loadtxt('newData-2/data_'+name+'_train.csv')
#train = loadtxt('data/data_'+name+'_train.csv')
# use deep copy here to make cvxopt happy
X = train[:, 0:2].copy()
Y = train[:, 2:3].copy()

#X = np.array([[1.0,2.0],[2.0,2.0],[0.0,0.0],[-2.0,3.0]])
#Y = np.array([[1.0],[1.0],[-1.0],[-1.0]])

# Carry out training, primal and/or dual
C = 1
svm = SVM(X,Y,C)
svm.train()
#model = svm.train_gold()

# Define the predictSVM(x) function, which uses trained parameters
def predictSVM(x):
	return svm.test(x)
	#return svm.test_gold(x,model)


# plot training results
plotDecisionBoundary(X, Y, predictSVM, [-1, 0, 1], title = 'SVM Train')



print '======Validation======'