Exemple #1
0
def predict_SVM():
    '''
    データ作成とモデルフィッティング
    '''
    N = 2000
    cls1 = np.random.randn(1000, 2)
    cls2 = np.random.randn(1000, 2) + np.array([5, 5])

    # データ行列Xを作成
    X = np.vstack((cls1, cls2))
    T = []
    for i in range(int(N / 2)):
        T.append(1.0)

    for i in range(int(N / 2)):
        T.append(-1.0)
    T = np.array(T)

    ##モデルフィッティング
    model = SVM()
    model.fit(X, T)
    pred_list = np.sign(model.w0 + np.dot(X, model.w))

    ##predict
    ok = []
    for i in range(len(X)):
        if T[i] == pred_list[i]:
            ok.append(1)

        else:
            ok.append(0)

    acc_SVM = np.sum(ok) / len(ok)
    print('Accuracy is {}'.format(acc_SVM))
Exemple #2
0
 def test_regression__SVM_learns_and_persists(self):
     
     s = SVM(self.seed_csv, self.default_data_csv)
     
     # Trains the SVM on the given attribute vector
     test_vec = [0.0,0.0,1.02378,0.86537,0.52096,0.0,0.0,0.86569,0.0,0.0,1.25239,0.0,0.0,0.10275,0.13453,0.14261,0.0,0.03815,1.45566,0.66246,0.0,0.14194,0.77532,0.0,0.0,0.0,0.0,0.0,0.85025,0.0,0.0,0.0,1.71965,0.0,1.12771,0.0,0.0,0.0,0.0,0.0,2.21248,0.0,0.0,0.0,1.02037,0.0,1.83393,0.0,0.0,0.0,0.0,0.0,3.22355,0.0,0.57061,0.0,0.61325,0.0,0.0,0.0,0.0,1.37463,0.0,0.0,0.31545,0.02344,1.8816,0.0,0.0,0.0,0.79182,1.63796,0.23462,0.0,0.0367,0.0,0.0,0.0,0.0,0.0,1.49467,0.53617,0.0,0.0,0.0,2.63275,0.15545,0.0,0.14912,0.0,0.05899,0.51951,0.08216,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.4772,0.0,0.0,0.0,0.07795,0.0,0.0,0.0,3.14931,1.51006,0.35246,0.0,0.0,0.0,1.25366,0.0,2.49412,0.01714,0.0,0.0,0.0,3.87597,0.0,0.0,0.0,0.0,0.0,0.0,0.227,0.36734,0.66069,0.00453,0.0,2.28759,0.0,0.0,0.0,0.0,0.0,0.0,0.48053,0.0,0.0,0.10111,1.86308,1.92005,0.80752,0.0,0.9832,0.01579,0.0,0.0,0.0,0.60559,0.56333,0.0,0.0,0.4936,0.0,0.0,0.95457,0.0,0.0,0.0,0.0,0.0,0.0,0.52915,0.0,0.0,0.0,2.60508,0.75095,0.0,1.36717,0.65725,0.0,0.0,0.0,2.06952,0.0,1.63788,0.0,0.0,1.96992,0.0,0.0,1.42637,0.0,2.98227,0.0,2.31867,0.00732,0.0,0.0,0.0,0.15545,1.00066,4.4553,0.0,0.0,3.22233,0.0,0.04355,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.4462,0.05998,0.0,0.0,0.0,0.0,0.0,1.89134,2.39107,0.0,0.0,0.0,0.22906,0.92307,0.0,0.0,1.45769,0.0,3.38134,0.0,0.0,0.0,0.54563,2.92672,0.03028,0.0,0.0,0.52902,0.0,0.0,0.0,0.0,1.23814,0.0,0.0,0.15778,0.22876,0.0,0.0,0.38084,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.27341,0.43933,0.0,0.0,0.0,2.29396,0.0,0.0,1.31989,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.22661,0.37916,0.0,0.0,0.0,0.0,0.0,0.0,1.17301,0.0,0.0,0.0,0.0,0.0,0.28877,0.0,0.0,1.38802,0.0,0.45544,2.08168,0.0,0.0,0.0,0.0,0.0,0.0,0.95298,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.81061,0.05561,0.0,0.0,0.68155,0.0,2.03513,0.0,0.0,0.0,0.0,1.49072,0.0,0.0,0.49542,0.0,0.0,0.96604,0.0,0.0,0.83591,0.3401,1.84218,0.58514,0.0,0.0,0.0,0.0,0.0,0.17452,0.0,0.0,0.0,0.0,0.0,1.80195,0.43078,0.12558,0.0,0.6141,0.21727,0.0,0.0,0.0,0.38526,0.0,0.0,0.0,0.51386,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,4.35328,3.54682,0.0,0.64922,0.6055,0.0,0.0,0.65163,0.0,0.0,0.55294,1.03124,0.0,0.0,2.1915,0.0,0.0,0.0,0.50909,0.0,0.0,1.39711,0.0,0.0,0.05591,0.0,0.0,1.00389,0.0,0.0,0.8889,0.0,0.0,0.75612,0.58052,0.0,0.0,0.5278,0.0,0.0,0.0,0.1389,0.0,0.2428,0.0,0.0,0.0,0.0,1.8734,0.0,0.0,0.0,1.78977,0.0,0.0,2.2345,0.70865,1.14073,0.35422,0.0,0.0,0.0,3.66343,0.0,0.0,0.4082,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.87945,0.0,2.49299,3.00523,0.0,0.0,0.01184,0.0,0.01448,0.0,2.63768,2.13688,0.0,0.62724,2.99925,0.0,0.70589,0.81796,0.71982,3.14888,0.89087,0.0,0.0,0.0,0.0,0.0,0.0,0.0,3.26729,0.0,0.0,0.4211,0.04467,0.98575,0.0,0.51702,0.0,0.0,0.0,2.09895,0.0,0.66031,0.0,0.0,0.0,0.0,0.0,0.84436,0.0,1.44374,0.01572,1.12008,2.37219,0.0,1.79202,0.0,0.0,0.0,0.0,0.44119,0.62798,2.03361,0.0,0.0,0.0,0.0,1.03021,0.0,1.4258,1.22678,1.8237,2.87319,0.56243,0.0,0.28223,1.06628,0.0,0.0,0.0,0.0,0.89911,0.0,0.98212,0.65271,0.0,0.0,3.05927,0.09557,1.73109,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.45386,0.22503,0.0,0.0,0.0,0.32894,0.0,0.07809,0.0,0.0,0.0,0.0,2.46268,0.0,0.0,1.07242,0.0,0.0,1.02452,0.0,0.0,0.0,1.36057,0.36657,0.0,3.37368,0.0,0.0,0.0,0.0,1.42513,0.08506,1.41931,0.0,0.0,2.42289,0.0,0.0,2.26081,1.08687,0.0,0.0,1.92282,0.0,0.0,0.34711,0.92995,0.0,0.0,0.0,1.00544,0.0,0.0,1.84394,0.0,0.0,0.0,0.0,0.54121,1.87131,0.0,0.0,1.90903,0.0,0.00192,0.0,0.0,0.0,0.0,0.0,0.79236,0.0,0.28244,0.0,0.0,0.38696,0.0,0.52426,1.24895,0.0,1.43088,2.01694,0.0,0.25583,2.92475,1.31583,0.0,0.0,0.41913,3.73163,0.67996,0.00378,0.0,2.05286,0.0,0.0,0.0,0.85315,1.5056,0.04749,0.02491,0.08462,0.0,0.0,0.0,2.09839,0.0,0.88255,3.86387,0.0,1.24396,0.0,0.0,0.0,0.0,0.0,0.0,0.45548,1.01171,0.0,0.0,0.0,0.06008,0.0,0.0,0.32583,0.11706,1.73475,0.46403,0.88229,0.0,0.0,2.65293,0.0,0.0,0.14577,0.16059,1.15921,0.76016,0.0,1.13632,0.0,1.51864,0.0,0.0,1.12836,0.91533,1.01396,0.0,0.0,0.0,0.0,0.72904,0.0,0.0,1.30529,0.0,2.56936,0.0,0.0,0.0,0.0,0.6739,0.66264,0.0,0.0,0.0,0.0,0.0,0.0,1.56187,0.0,0.61619,0.0,0.0,1.54011,0.0,0.0,0.0,0.50495,0.0,0.0,1.09793,0.0,0.0,0.0,1.586,2.60731,0.89228,1.74828,0.0,0.80611,0.26768,0.0,0.0,0.0,0.97478,0.0,0.82535,0.0,0.0,0.0,0.0,0.0,0.0,0.59424,0.0,3.40208,0.29945,0.0,0.0,1.40285,0.0,2.53214,0.89456,0.35867,0.01142,0.0,0.0,1.72754,0.0,0.0,0.0,1.96116,0.0,0.0,0.0,0.67893,0.93432,0.0,1.17659,0.34302,0.0,0.0,0.0,0.08902,0.79611,0.91685,0.0,0.4134,0.0,1.26203,0.0,0.0,1.0672,0.0,0.0,0.0,0.0,1.87626,0.0,0.07163,0.0,2.77396,0.8359,1.01072,0.69999,0.67118,0.0,0.02703,3.21478,0.17149,1.29662,1.4282,0.22523,0.88462,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.15945,0.0,0.0,0.0,1.20521,0.0,0.0,1.14212,0.0,0.0,1.06764,0.0,0.0,0.48324,0.0,0.0,0.93048,0.0,0.0,0.04668,1.24215,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.63506,0.0,0.0,2.0197,0.0,0.0,0.64068,0.0,0.67773,0.07286,0.0,0.07325,0.0,0.61848,0.0,0.51642,0.0,0.13415,0.0,0.24762,0.0,0.0,1.138,0.0,0.05095,0.09744,2.72067,0.0,0.0,2.17862,2.24626,0.78026,0.0,0.0,0.0,0.91128,0.0,2.93209,0.0,1.68081,0.0,0.0,0.36658,0.02664,0.0,0.0,0.0,0.08065,0.0,0.02219,0.0,0.0,0.15345,0.0,1.36435,0.0,0.0,1.28141,0.0,0.13168,0.07221,0.21724,0.59902,0.0,0.64866,2.38968,0.85281,0.0,0.0,0.36304,0.0,0.0,0.0,0.08364,0.0,1.41225,1.02908,0.0,1.03806,0.7982,0.0,3.80226,0.85739,0.0,0.0,0.0,0.87857,0.16096,0.0,1.59718,2.50409,0.0,0.0,0.0,3.12388,0.0,1.52823,0.0,0.0,3.08214,0.0,0.74487,0.2059,0.0,0.07696,0.0,0.0,0.0,1.26256,0.0,0.0,0.0,0.0,0.45868,4.24342,0.0,0.0,0.00141,0.0,1.17591,0.0,0.0,0.0,0.99489,2.00839,0.0,0.0,0.0,5.00877,1.04824,0.0,0.0,0.0,0.0,0.0,0.0,0.82028,0.0,1.21763,0.0,0.14246,0.0,0.0,3.0032,0.90946,0.0,0.0,4.64447,0.0,0.0,0.0,0.0,0.26529,0.94394,0.0,0.99551,0.0,0.0,0.183,1.53081,1.37277,0.0,0.22932,3.72266,0.0,0.0,0.0]
     true_class = 300.0
     
     s.learn([test_vec], [true_class])
     
     del s
     
     # Checks the knowledge is correctly stored in the CSV file
     expected_vec = [true_class] + list(deepcopy(test_vec))
     
     try:
         with open(self.seed_csv) as myfile:
             csvread = csv.reader(myfile)
             
             last_line = []
             # Get the last value
             for line in csvread:
                 last_line = line
             
             last_line = deepcopy([float(i) for i in last_line])
             
             self.assertEqual(expected_vec, last_line)
                 
     except IOError:
         print "Could not open " . self.seed_csv
Exemple #3
0
def svmTest(feature_len, all_lines, all_features, all_labels):
    counts = {}
    for i in range(10):
        rate = 0
        print("Test %d:" % (i + 1))
        train_features = all_features[0:int(0.8 * len(all_features))]
        train_labels = all_labels[0:int(0.8 * len(all_features))]
        test_features = all_features[int(0.8 * len(all_features)):]
        test_labels = all_labels[int(0.8 * len(all_features)):]
        length = len(test_labels)
        for C in range(50, 61, 1):
            rate = 0
            new_svm = SVM(train_features,
                          train_labels,
                          C=C,
                          function='RBF',
                          d=0.53)
            # print("Train:")
            new_svm.train()
            # print("\nPredict:", end = "\n")
            for j in range(0, length):
                res = new_svm.predict(test_features[j])
                if res == test_labels[j]:
                    rate += 1
            print("C = %f: " % C, end=" ")
            print(rate / length)
            if C not in counts:
                counts[C] = rate / length
            else:
                counts[C] += rate / length
        all_features, all_labels = now_provider.getFeatureAndLabel(
            all_lines, feature_len)
    for x, y in counts:
        print(x, y)
Exemple #4
0
def train_and_test(train_x, test_x, training_class, testing_class, kernel):
    classes = Counter(training_class)
    classes = classes.keys()
    total_accuracy = []
    for label in classes:
        train_y = []
        for t in training_class:
            if t == label:
                train_y.append(1.0)
            else:
                train_y.append(-1.0)
        train_y = np.array(train_y)

        test_y = []
        for t in testing_class:
            if t == label:
                test_y.append(1.0)
            else:
                test_y.append(-1.0)
        test_y = np.array(test_y)

        classfier = SVM(kernel=kernel, C=0.1)
        classfier.train(train_x, train_y)
        y_predict = classfier.test(test_x)
        correct = np.sum(y_predict == test_y)
        print("%d out of %d predictions correct" % (correct, len(y_predict)))
        accuracy = correct / len(y_predict)
        print("accuracy is {}".format(accuracy))
        total_accuracy.append(accuracy)
    mean_accuracy = np.mean(np.array(total_accuracy))
    print('mean accuracy is {}'.format(mean_accuracy))

    return mean_accuracy
Exemple #5
0
def main(C=1.0, epsilon=0.001):
    # Split data
    iris = datasets.load_iris()
    X = iris.data
    y = iris.target

    class_chosen = 1 # only this class is chosen
    y = np.asarray([-1 if y[i]!=class_chosen else 1 for i in range(y.shape[0])])

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)

    # Initialize model
    model = SVM(X_train, y_train, C=C, tolerance=epsilon)

    # Fit model
    support_vectors, iterations = model.fit()
    # Support vector count
    sv_count = support_vectors.shape[0]

    # Make prediction
    y_hat = model.predict(X_test)

   #  print(y_hat.shape, y_test.shape)

    # Calculate accuracy
    acc = calc_acc(y_test, y_hat)

    print("Support vector count: %d" % (sv_count))
    # print("bias:\t\t%.3f" % (model.b))
    # print("w:\t\t" + str(model.w))
    print("accuracy:\t%.3f" % (acc))
    print("Converged after %d iterations" % (iterations))
Exemple #6
0
def crossValidation(X, Y, k, kernel=np.dot):
  # answerを正解としてresultの正解率を計算
  def accuracyRate(result, answer):
    return 1.0 - sum(abs(result - answer)/2) / float(len(result))

  n = len(X)  # データ点の個数
  l = n / k   # k分割したデータ点の個数
  ac = 0.0    # 正解率の初期化

  # k-交差検定を行い正解率を計算
  for i in range(k):
    # l個の評価ベクトルとそのクラス
    testVectors = X[l*i:l*(i+1)]
    classForTestVectors = Y[l*i:l*(i+1)]
    # n-l個の訓練ベクトルとそのクラス
    learningVectors = np.vstack((X[:l*i], X[l*(i+1):]))
    classForlearningVectors = np.hstack((Y[:l*i], Y[l*(i+1):]))

    # 訓練ベクトルからサポートベクターを計算
    svm = SVM(learningVectors, classForlearningVectors, kernel)
    # 学習した識別関数で評価ベクトルを識別
    result = [svm.discriminate(t) for t in testVectors]
    # 評価結果の正解率を算出
    ac += accuracyRate(result, classForTestVectors)

  # 正解率の平均を返す
  return ac / k
Exemple #7
0
 def __init__(self,
              feature_size,
              label_size,
              lambda_reg=0.1,
              max_iter=5000,
              mode='ova'):
     self._feature_size = feature_size
     self._label_size = label_size
     self._lambda_reg = lambda_reg
     self._max_iter = max_iter
     self._models = list()
     self._mode = mode
     if self._mode == 'ova':
         for i in xrange(self._label_size):
             self._models.append(
                 SVM(feature_size=self._feature_size,
                     lambda_reg=self._lambda_reg,
                     max_iter=self._max_iter))
     elif self._mode == 'ava':
         for i in xrange(self._label_size - 1):
             row = []
             for j in xrange(i + 1, self._label_size):
                 row.append(
                     SVM(feature_size=self._feature_size,
                         lambda_reg=self._lambda_reg,
                         max_iter=self._max_iter))
             self._models.append(row)
     else:
         print 'Parameter error: only support one-vs-all and all-vs-all'
         exit(1)
Exemple #8
0
def main(filename='data\iris-virginica.txt',
         C=1.0,
         kernel_type='linear',
         epsilon=0.001):
    # Load data
    (data, _) = readData('%s\%s' % (filepath, filename), header=False)
    data = data.astype(float)

    # Split data
    X, y = data[:, 0:-1], data[:, -1].astype(int)

    # Initialize model
    model = SVM()

    # Fit model
    support_vectors, iterations = model.fit(X, y)

    # Support vector count
    sv_count = support_vectors.shape[0]

    # Make prediction
    y_hat = model.predict(X)

    # Calculate accuracy
    acc = calc_acc(y, y_hat)

    print("Support vector count: %d" % (sv_count))
    print("bias:\t\t%.3f" % (model.b))
    print("w:\t\t" + str(model.w))
    print("accuracy:\t%.3f" % (acc))
    print("Converged after %d iterations" % (iterations))
def part_b(org_train, org_train_labels, best_lr):
    """
    This function implements part B
    :return: best learning rate
    """
    print "part b - start"
    c_lst = np.array(list(range(1, 999, 10))).astype("float32") / 1000.0

    validating_acc_lst = []
    for c in c_lst:
        mean_acc = 0
        for i in range(10):
            svm = SVM(org_train.shape[1])
            svm.train(org_train, org_train_labels, best_lr, C=c, T=1000)
            mean_acc += svm.test(org_validation, org_validation_labels)
        validating_acc_lst.append(mean_acc / (i + 1))
    plt.figure()
    plot_graph(validating_acc_lst, c_lst, "q3_part_b", "",
               "Accuracy vs C for SVM", "Accuracy", "C")
    best_acc_indx = validating_acc_lst.index(max(validating_acc_lst))
    best_c = c_lst[best_acc_indx]
    print "The best C is {} for accuracy: {}".format(best_c,
                                                     max(validating_acc_lst))
    print "part b - done"
    return best_c
def part_a(org_train, org_train_labels):
    """
    This function calculates part A
    :return: best learning rate
    """
    print "part a - start"
    learning_rate_lst = np.array(list(range(1, 99,
                                            1))).astype("float32") / 100.0

    validating_acc_lst = []
    for lr in learning_rate_lst:
        mean_acc = 0
        for i in range(10):
            svm = SVM(org_train.shape[1])
            svm.train(org_train, org_train_labels, lr, T=1000)
            mean_acc += svm.test(org_validation, org_validation_labels)
        validating_acc_lst.append(mean_acc / (i + 1))
    plt.figure()
    plot_graph(validating_acc_lst, learning_rate_lst, "q3_part_a", "",
               "Accuracy vs Learning Rate for SVM", "Accuracy",
               "Learning Rate")
    best_acc_indx = validating_acc_lst.index(max(validating_acc_lst))
    best_lr = learning_rate_lst[best_acc_indx]
    print "The best learning rate is {} for accuracy: {}".format(
        best_lr, max(validating_acc_lst))
    print "part a - done"
    return best_lr
Exemple #11
0
def Task2():
    # load data
    print('Loading data ...')
    X_train, y_train = loadData(file_path + '\spamTrain.mat')
    X_test, y_test = loadData(file_path + '\spamTest.mat')
    # plotData(X_train, y_train, title='Task 2')

    print('Program paused. Press enter to continue.\n')
    input()

    # trainging
    svm = SVM()
    print('Number of samples: {}'.format(X_train.shape[0]))
    print('Number of features: {}'.format(X_train.shape[1]))
    print('Training Linear SVM ...')
    C = 1
    sigma = 0.01
    model = svm.svmTrain_SMO(X_train, y_train, C, max_iter=20)
    pred_train = svm.svmPredict(model, np.mat(X_train), sigma)
    acc_train = 1 - abs(np.sum(pred_train - y_train)) / len(y_train)
    print('Train accuracy: {}'.format(acc_train))

    # test
    print('Number of samples: {}'.format(X_test.shape[0]))
    print('Number of features: {}'.format(X_test.shape[1]))
    pred_test = svm.svmPredict(model, np.mat(X_test), sigma)
    acc_test = 1 - abs(np.sum(pred_test - y_test)) / len(y_test)
    print('Test accuracy: {}'.format(acc_test))
Exemple #12
0
def build(kernel, metric, keys_limit, svm_C, logs):
    trainX = genfromtxt('input/arcene_train.data', delimiter=' ')
    trainY = genfromtxt('input/arcene_train.labels', delimiter=' ')
    validX = genfromtxt('input/arcene_valid.data', delimiter=' ')
    validY = genfromtxt('input/arcene_valid.labels', delimiter=' ')

    keys = metric.build(trainX.transpose(),
                        trainY,
                        logs=logs,
                        limit=keys_limit)

    tX = []
    for x in trainX:
        tX.append(np.take(x, keys))

    tX = np.array(tX)

    clf = SVM(kernel=kernel.kernel, C=svm_C)
    clf.fit(tX, trainY)

    vX = []
    for x in validX:
        vX.append(np.take(x, keys))

    vX = np.array(vX)

    predict_arr = [clf.predict(x) for x in vX]

    confusion_matrix = Statistic.get_metrics(predict_arr, validY)
    f_measure = Statistic.get_f_measure(confusion_matrix)

    return keys, confusion_matrix, f_measure
Exemple #13
0
    def run_test(self, X, y, kernel):
        n = int(X.shape[0] * 0.8)
        K = self.gram_matrix(X, kernel)

        svm = SVM(kernel, 1.0, K)
        svm.fit(np.arange(n), y[:n])
        score = svm.score(np.arange(n, X.shape[0]), y[n:])

        return score
    def run_test(self, X, y, kernel):
        n = int(X.shape[0] * 0.8)
        K = self.gram_matrix(X, kernel)

        svm = SVM(kernel, 1.0, K)
        svm.fit(np.arange(n), y[:n])
        score = svm.score(np.arange(n, X.shape[0]), y[n:])

        return score
Exemple #15
0
    def test_simple(self):
        X = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0],
                      [1.0, 1.0], [2.4, 2.4], [2.6, 2.6], [4.0, 4.0]])
        y = np.array([0.0, 0.0, 1.0, 1.0])
        K = self.gram_matrix(X, kernels.linear)
        svm = SVM(kernels.linear, 1.0, K)
        svm.fit(np.arange(4), y)

        result = svm.predict(np.arange(4, 8))
        np.testing.assert_allclose(result, [0, 0, 1, 1])
    def test_simple(self):
        X = np.array([[1.0, 1.0],[2.0, 2.0],[3.0, 3.0],[4.0, 4.0],
                      [1.0, 1.0],[2.4, 2.4],[2.6, 2.6],[4.0, 4.0]])
        y = np.array([0.0, 0.0, 1.0, 1.0])
        K = self.gram_matrix(X, kernels.linear)
        svm = SVM(kernels.linear, 1.0, K)
        svm.fit(np.arange(4), y)

        result = svm.predict(np.arange(4,8))
        np.testing.assert_allclose(result, [0, 0, 1, 1])
Exemple #17
0
def test_image():
    X, y = get_image_data()
    X = np.column_stack([[1] * X.shape[0], X])
    X_train,X_test,y_train,y_test = \
       train_test_split(X,y,test_size=0.2,random_state = np.random.RandomState(42))
    clf = SVM()
    clf.fit(X_train, y_train)
    y_pred = clf.predict(X_test)
    correct_rate = 1 - np.mean(y_test != y_pred)
    print 'correct_rate:', correct_rate
Exemple #18
0
def main():
    predictorRF = RandomForest()
    predicatorSVM = SVM()
    predicatorLogistic_Regression = Logistic_Regression()
    print('========Random Forest============')
    predictorRF.run()
    print('========SVM============')
    predicatorSVM.run()
    print('========Logistic Regression============')
    predicatorLogistic_Regression.run()
Exemple #19
0
def main():
    print('線形データ')
    X, y = linear_data(500)
    X_train, X_test, y_train, y_test = train_test_split(X, y)
    # 初期化
    model = SVM(kernel='rbf')
    # 学習
    acc = train(model, X_train, X_test, y_train, y_test)
    show_data(X_test, y_test)
    show_boader(model, X_train)

    print('実験2: 非線形データ')
    Ns = [50, 100, 500, 1000]
    print('実験2-1: 異なるカーネルでの実験')
    models = [SVM(kernel='rbf'), SVM(kernel='sigmoid'), SVM(kernel='linear')]
    kernels = ['RBF', 'Sigmoid', 'Linear']
    df_score = pd.DataFrame(index=Ns, columns=kernels)
    df_time = pd.DataFrame(index=Ns, columns=kernels)
    for N in Ns:
        print('データ数: %d' % N)
        X, y = sin_data(N)
        X_train, X_test, y_train, y_test = train_test_split(X, y)
        show_data(X, y)
        plt.show()
        for model, kernel in zip(models, kernels):
            print(kernel)
            acc = train(model, X_train, X_test, y_train, y_test)
            df_score.loc[N, kernel] = acc
            df_time.loc[N, kernel] = model.elapsed_time
    print(df_score)
    print(df_time)
    df_score.to_csv('カーネルごとの正解率')
    df_time.to_csv('カーネルごとの学習時間')

    print('実験2-2: 異なるパラメータでの実験')
    X, y = sin_data(500)
    X_train, X_test, y_train, y_test = train_test_split(X, y)
    show_data(X, y)
    plt.show()
    Cs = [2**a for a in range(-2, 3)]
    gammas = [2**a for a in range(-4, 2)]
    df_score = pd.DataFrame(index=Cs, columns=gammas)
    df_time = pd.DataFrame(index=Cs, columns=gammas)
    for C in Cs:
        for gamma in gammas:
            print('C: %.2f, gamma: %.4f' % (C, gamma))
            model = SVM(C=C, gamma=gamma)
            # 学習
            acc = train(model, X_train, X_test, y_train, y_test)
            df_score.loc[C, gamma] = acc
            df_time.loc[C, gamma] = model.elapsed_time
    print(df_score)
    print(df_time)
    df_score.to_csv('パラメータごとの正解率.csv')
    df_time.to_csv('パラメータごとの学習時間.csv')
Exemple #20
0
 def __init__(self):
     self.standard_data = Standardiser()
     print("standardiser started")
     self.standard_data.loadScale()
     print("scale loaded")
     self.clf = SVM()
     print("svm loaded")
     self.clf.loadModel()
     print("model loaded")
     self.outputfile = 'svmoutput.csv'
     self.forecast_loc = 'finalweather.csv'
Exemple #21
0
    def fit(self, X_idx, y):
        self.classes = np.unique(y)
        logging.debug('Fitting %s data points with %s different classes '\
                      'with multiclass svm', X_idx.shape[0], len(self.classes))

        self.svms = []
        for class_a, class_b in itertools.combinations(self.classes, 2):
            filtered_X_idx, filtered_y = self.filter_data(X_idx, y, class_a, class_b)

            svm = SVM(self.kernel, self.C, self.K)
            svm.fit(filtered_X_idx, filtered_y)
            self.svms.append((class_a, class_b, svm))
Exemple #22
0
def SVMResult(vardim, x, bound, dataset):
    X = dataset.loc[dataset['split'] == 'train'].iloc[:, 0:-2].values
    y = dataset.loc[dataset['split'] == 'train'].iloc[:, -2].values
    val_X = dataset.loc[dataset['split'] == 'val'].iloc[:, 0:-2].values
    val_y = dataset.loc[dataset['split'] == 'val'].iloc[:, -2].values
    c = abs(x[0])
    g = abs(x[1])
    # f = x[2]#四参数
    svm = SVM(C=c, gamma=g)
    predictor = svm.train(X, y)
    y_bar = predictor.predict_vec(val_X)

    return score(y_bar, val_y)
Exemple #23
0
def cross_validation(x_train, y_train, C, gamma):
    model = SVM(C=C, kernel='rbf', gamma=gamma, tol=1e-2)
    cross = lambda arr, sz: [arr[i:i + sz] for i in range(0, len(arr), sz)]
    x_cross_val = np.array(cross(x_train, 160))
    y_cross_val = np.array(cross(y_train, 160))
    indices = np.array(range(5))
    score = 0
    for i in range(5):
        curr_indices = np.delete(indices, i)
        x_curr_valid = x_cross_val[i]
        y_curr_valid = y_cross_val[i]
        x_curr_train = np.vstack(x_cross_val[curr_indices])
        y_curr_train = y_cross_val[curr_indices].ravel()
        model.fit(x_curr_train, y_curr_train)
        model.number_support_vectors()
        y_curr_valid_predict = model.predict(x_curr_valid, x_curr_train,
                                             y_curr_train)
        curr_score = model.score_error(y_curr_valid_predict, y_curr_valid)
        print(
            "i = ",
            i,
            ". Score error = ",
            curr_score,
            ", i = ",
            i,
        )
        score += curr_score
    print("Average score: ", score / 5)
    return score / 5
Exemple #24
0
def real_data_train():
    x, y = create_array_real_data()
    shuffle_index = np.random.permutation(len(y))
    x = x[shuffle_index]
    y = y[shuffle_index]
    # 1000 elements: 800 for training, 200 for testing
    x_train, x_test, y_train, y_test = train_test_split(x,
                                                        y,
                                                        test_size=0.2,
                                                        random_state=42)
    # C = [1, 10]
    # gamma = [0.01, 0.1, 0.5, 1.0]
    # average_error = np.zeros((len(C), len(gamma)))
    # for i in range(len(C)):
    #     for j in range(len(gamma)):
    #         print("Cross-validation for parameters C = ", C[i], ", gamma = ", gamma[j])
    #         average_error[i][j] = cross_validation(x_train, y_train, C=C[i], gamma=gamma[j])
    # find C = 1, gamma = 0.01
    print("Create model C = ", 1000, ", gamma = ", 1)
    model = SVM(C=1, kernel='rbf', gamma=0.01, tol=1e-2)
    print("Fit model with train sequence")
    model.fit(x_train, y_train)
    model.number_support_vectors()
    print("Predict model on test sequence")
    y_test_predict = model.predict(x_test, x_train, y_train)
    score = model.score_error(y_test_predict, y_test)
    print("Score error = ", score)
Exemple #25
0
def main():
    dataset_dir = '../data/student-mat.csv'
    select_col = ['school', 'sex', 'age', 'address', 'famsize', 'Pstatus', 'Medu', 'Fedu', 'Mjob', 'Fjob', 'reason',
                  'guardian', 'traveltime', 'studytime', 'failures', 'schoolsup', 'famsup', 'paid', 'activities',
                  'nursery', 'higher', 'internet', 'romantic', 'famrel', 'freetime', 'goout', 'Dalc', 'Walc', 'health',
                  'absences',]
    #               'G1', 'G2']
    select_col = ['G1', 'G2']
    train_x, train_y, test_x, test_y = data_loader(dataset_dir, select_col=select_col)
    knn = SVM()
    knn.fit(train_x, train_y)
    predict_y = knn.predict(test_x)
    result = evaluate(test_y, predict_y)
    print(result)
 def testQP(self):
     from SVM import SVM
     dir_path = os.path.dirname(os.path.realpath(__file__))
     X_train = np.genfromtxt(dir_path + "/datasets/X_train.csv",
                             delimiter=',')
     y_train = np.genfromtxt(dir_path + "/datasets/y_train.csv",
                             delimiter=',')
     y_train[y_train == 0] = -1
     svm = SVM(method='qp_hard')
     acc = np.mean(cross_validation(svm, X_train, y_train))
     print("SVM with QP method (hard margin): accuracy =", acc)
     svm = SVM(method='qp_soft')
     acc = np.mean(cross_validation(svm, X_train, y_train))
     print("SVM with QP method (soft margin): accuracy =", acc)
Exemple #27
0
def auto_get_parameter(X_train,y_train,X_val,y_val):
    learning_rates=[1e-7,5e-5]
    regularization_strength=[5e4,1e5]
    best_parameter=None
    best_val=-1
    for i in learning_rates:
        for i in regularization_strength:
            svm=SVM()
            y_pred=svm.predict(X_train,y_train,j,1,200,1500,True)
            acc_val=np.mean(y_val==y_pred)
            if best_val<acc_val:
                best_val=acc_val
                best_parameter=(i,j)
    print('have been identified parameter Best validation accuracy achieved during cross-validation: %f' % best_val )
    return best_parameter
Exemple #28
0
def test_multi():
    X, y = get_multi_data()
    X = np.column_stack([[1] * X.shape[0], X])
    X_train,X_test,y_train,y_test = \
       train_test_split(X,y,test_size=0.2,random_state = np.random.RandomState(42))
    clf = SVM()
    clf.fit(X_train, y_train)
    y_pred = clf.predict(X_test)
    correct_rate = 1 - np.mean(y_test != y_pred)
    print 'correct_rate:', correct_rate

    plot_samples(X, y)
    print clf.w
    for w in clf.w:
        plot_line(X[:, 1:], w)
Exemple #29
0
def main(filename='iris-virginica.txt', C=1.0, kernel_type='linear', epsilon=0.001):

    # Load data
#    (data, _) = readData('%s/%s' % (filepath, filename), header=False)
#    data = data.astype(float)
    data = pd.read_excel("C:/Users/Niku/Documents/dataset/arrays.xlsx")
    print(data.shape)
    X = data[0:1500]
    X = np.array(X)
    y = X[:, 35]
    X = X[:, 0:35]
    print(X.shape)
    #y = np.matrix(y)
    y = np.array(y)
    y[y == 0] = -1
    y1 = np.matrix(y)
    print(y.shape, X.shape, y1.shape)
    # Split data
#    X, y = data[:,0:-1], data[:,-1].astype(int)
#    print (X.shape)
#    print (y.shape)
#    X1 = np.matrix(X)
#    y1 = np.matrix(y)
#    print (X1.shape)
#    print (y1.shape)
#    print(type(X))
#    print(type(y))
    # Initialize model
    model = SVM()

    # Fit model
    support_vectors, iterations = model.fit(X, y)

    # Support vector count
    sv_count = support_vectors.shape[0]

    # Make prediction
    y_hat = model.predict(X)

    # Calculate accuracy
    acc = calc_acc(y, y_hat)

    print("Support vector count: %d" % (sv_count))
    print("bias:\t\t%.3f" % (model.b))
    print("w:\t\t" + str(model.w))
    print("accuracy:\t%.3f" % (acc))
    print("Converged after %d iterations" % (iterations))
Exemple #30
0
    def svmdef(self):
        print("SVM Start")
        self.trainname.setText("SVM")
        file = self.trainfile.text()
        print(file)
        start = time.time()
        s = SVM()
        a = s.accuracy(file)

        end = time.time()
        t = (end - start)

        self.traintime.setText(str(round(t, 2)) + " (sec)")

        self.label_4.setText("Accuracy")
        self.trainstatus.setText(str(round(a, 3)))
        AccuracyStore.store('svm', a)
def best_params():
    lr_list = [0.1, 0.01, 0.05, 0.001, 0.005, 0.0001, 0.0005]
    acc_max = 0
    lr_max = 0
    lamda_max = 0
    lambda_list = [0.1, 0.01, 0.05, 0.001, 0.005, 0.0001, 0.0005]
    for lr_val in lr_list:
        for lmda in lambda_list:
            clf = SVM(lr=lr_val, lamda=lmda)
            clf.fit(X_train, Y_train)
            predictions = clf.predict(X_test)
            acc = accuracy(Y_test, predictions)
            if acc > acc_max:
                acc_max = acc
                lr_max = lr_val
                lamda_max = lmda
    return (lr_max, lamda_max, acc_max)
def part_c(org_train, org_train_labels, best_c, best_lr):
    """
    This function implements part C
    :param best_c: Best C
    :param best_lr: Best Learning rate
    :return: the SVM
    """
    print "part c - start"
    svm = SVM(org_train.shape[1])
    svm.train(org_train, org_train_labels, best_lr, C=best_c, T=20000)

    # Save the weights image
    plt.figure()
    plt.imshow(np.reshape(svm.get_weights(), (28, 28)),
               interpolation='nearest')
    print "part c - done"
    return svm
Exemple #33
0
    def Step_B(self, T0_list, T1_list):
        '''
        应用SVM分割设计空间,并按照T1_list中的参数设置优化超平面
        '''
        if len(T0_list) != len(T1_list):
            raise ValueError('T0列表与T1列表数目不相符')

        #理论分割函数
        f = self.f
        data = np.loadtxt(self.logPath + '/A_Samples.txt')
        samples = data[:, 0:f.dim]
        mark = data[:, f.dim + 1]
        Kernal_Gau = lambda x, y: np.exp((-np.linalg.norm(x - y)**2) / 80)
        Kernal_Poly = lambda x, y: (np.dot(x, y) + 1)**7
        svm = SVM(5,
                  kernal=Kernal_Gau,
                  path=self.logPath,
                  fileName='SVM_Step_B.txt')
        print('训练初始支持向量机...')
        svm.fit(samples, mark, maxIter=20000, maxAcc=1.1)
        test28(svm=svm)

        #记录每轮加点的数目
        pointNum = np.zeros(len(T1_list) + 1)
        pointNum[0] = samples.shape[0]

        for k in range(len(T1_list)):
            print('\n第%d轮加点...' % (k + 1))
            new_x = svm.infillSample4(T0_list[k], T1_list[k], f.min, f.max,
                                      [12, 6, 9, 9, 9])
            if new_x is None:
                print('当T1设置为%.2f时,加点数目为0' % T1_list[k])
                pointNum[k + 1] = samples.shape[0]
                continue
            else:
                num = new_x.shape[0]

            new_mark = np.zeros(num)
            for i in range(num):
                new_mark[i] = f.isOK(new_x[i, :])
            samples = np.vstack((samples, new_x))
            mark = np.append(mark, new_mark)
            print('训练支持向量机...')
            svm.fit(samples, mark, 20000, maxAcc=1.1)

            test28(svm=svm)
            pointNum[k + 1] = samples.shape[0]
            print('本轮加点数目:%d' % pointNum[k + 1])

        value = np.zeros(samples.shape[0])
        for i in range(samples.shape[0]):
            value[i] = f.aim(samples[i, :])
        value = value.reshape((-1, 1))
        mark = mark.reshape((-1, 1))
        storeData = np.hstack((samples, value, mark))
        np.savetxt(self.logPath + '/B_Samples.txt', storeData)

        print('样本点数目:')
        print(pointNum)
        print('加点结束')
Exemple #34
0
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1]]
X=np.array(X).transpose()
print X.shape


y=np.array(y).flatten(1)
y[y==0]=-1
print y.shape

svms=SVM(X,y)
svms.train()
print len(svms.supportVector)
for i in range(len(svms.supportVector)):
	t=svms.supportVector[i]
	print svms.x[:,t]
svms.prints_test_linear()
 #加载词汇列表
 obj1.loadVocabList()
 '''
 预处理邮件数据
 1) 将邮件中所有的单词统一小写处理
 2)将所有的数字统一变为 ‘number’
 3)将所有的邮件统一变为‘emailaddr’
 4)将所有的$统一变为 ‘dollar’
 5)将所有的url统一变为‘httpaddr’
 6) 将html标签都去掉
 7)将所有非字母数字以及下划线_的符号都去掉,将tab 多个空格 等都变成一个space
  '''
 obj1.proMailData()
 #波特词干提取
 obj1.porterStemmer()
 obj1.getWordIndices()
 print obj1.wordIndices
 #print obj1.wordIndices
 #print len(obj1.wordIndices)
 #print len(set(obj1.wordIndices))
 obj1.getFeatures()
 #print obj1.mailFeatures.T
 print shape(obj1.mailFeatures.T)
 svmObj = SVM("data/svm/spamTrain.mat", "data/svm/spamTest.mat", obj1.mailFeatures.T)
 svmObj.processData()
 c = 100
 t = 0
 svmObj.trainModel(c, t)
 t = 2
 svmObj.trainModel(c, t)
 print "耗费的时间为:", time.time() - time_ben
Exemple #36
0
			pairwiseGTA = Weight.load(args.weight[0])
			GTA_weight = Weight(gta_profs, pairwiseGTA)
			GTA_clusters = GTA_weight.cluster(cluster_type, d)
			GTA_weight.weight(GTA_clusters)
			# Weight Virus
			pairwiseViral = Weight.load(args.weight[1])
			virus_weight = Weight(viral_profs, pairwiseViral)
			virus_clusters = virus_weight.cluster(cluster_type, d)
			virus_weight.weight(virus_clusters)

		# Create SVM
		c = args.c[0]
		kernel = args.kernel[0]
		kernel_var = float(args.kernel[1])

		svm = SVM(gta_profs, viral_profs, c, kernel, kernel_var)

		# Print support vectors
		if args.svs:
			svm.show_svs()

		# Xval	
		if args.xval:
			nfolds = args.xval
			if args.weight:
				result = svm.xval(nfolds, NREPS, pairwiseGTA, pairwiseViral, cluster_type, d)
			else:
				result = svm.xval(nfolds, NREPS)
			if mini:
				print("GTA Correct\tViral Correct")
				print("%.2f\t%.2f" % (result[0], result[1]))
def main():
    dm_model = Doc2Vec.load('400_pvdm_doc2vec.d2v')
    dbow_model = Doc2Vec.load('400_pvdbow_doc2vec.d2v')

    #Load datasets for classfying
    path = 'datasets/'
    doc2vec_vector_size = 400
    files = [f for f in listdir(path) if isfile(join(path,f))]
    files.pop(0)

    data_loader = DataLoader(path)

    domains = data_loader.csv_files


    names = {1: 'title', 4: 'abstract', 5: 'mesh', 'y': 6}

    domain_features = data_loader.get_feature_matrix(names)
    domain = domain_features.pop(0)
    x, y = domain
    #get size
    n_total_documents = 0

    for domain in domain_features:
        n_total_documents+=len(domain[0])
        x = numpy.hstack((x, domain[0]))
        y = numpy.hstack((y, domain[1]))
    x, y = data_loader.create_random_samples(x, y, train_p=.8, test_p=.2)
    train_x, test_x = x
    train_y, test_y = y
    transformed_train_x = data_loader.get_transformed_features(train_x, sparse=True, tfidf=True, add_index_vector=False)
    transformed_test_x = data_loader.get_transformed_features(test_x, sparse=True, tfidf=True)
    all_features = numpy.zeros(shape=(n_total_documents, 800))
    all_labels = numpy.asarray([])

    i = 0

    dbow_dm_train_x = numpy.zeros((train_x.shape[0], 2*doc2vec_vector_size))
    dbow_dm_test_x = numpy.zeros((test_x.shape[0], 2*doc2vec_vector_size))

    """
        Set up the feature for the SVM by iterating through all the word vectors.
        Pre process each vector and then feed into doc2vec model, both the distributed memory
        and distributed bag of words. Concatenate the vectors for better classification results
        as per paragraph to vector paper by Mikolv.
    """
    for feature_vector in train_x:
        preprocessed_line = list(Doc2vec.Doc2VecTool.preprocess_line(feature_vector))
        dbow_dm_train_x[i, 0:400] = dm_model.infer_vector(preprocessed_line)
        dbow_dm_train_x[i, 400:] = dbow_model.infer_vector(preprocessed_line)
        i+=1

    """
        Do the same as above but for the test set.
    """

    i = 0

    for feature_vector in test_y:
        preprocessed_line = list(Doc2vec.Doc2VecTool.preprocess_line(feature_vector))
        dbow_dm_test_x[i, 0:400] = dm_model.infer_vector(preprocessed_line)
        dbow_dm_test_x[i, 400:] = dbow_model.infer_vector(preprocessed_line)
        i+=1

    print("Training doc2vec SVM")
    #Train SVM on classic bow
    svm = SVM()
    svm.train(dbow_dm_train_x, train_y)
    svm.test(dbow_dm_test_x, test_y)
    print("end of training doc2vec bow SVM\n")


    print("Training classic bow SVM")
    #Train SVM on classic bow
    svm = SVM()
    svm.train(transformed_train_x, train_y)
    svm.test(transformed_test_x, test_y)
    print("end of training classic bow SVM\n")
def main():



    print("Loading data...")
    X_list, y_list = get_data()

    print("Loaded data...")
    print('\n')
    dataset_names = DataLoader.get_all_files('Data')
    dataset_names = [name.split('/')[1].split('.')[0] for name in dataset_names]
    undersample = True

    for i, (X, y) in enumerate(zip(X_list, y_list)):
        print("Dataset: {}".format(dataset_names[i]))

        X = np.array(X)
        y = np.array(y)

        n = len(X)

        kf = KFold(n, random_state=1337, shuffle=True, n_folds=5)

        fold_accuracies = []
        fold_recalls = []
        fold_precisions =[]
        fold_aucs = []
        fold_f1s = []

        for fold_idx, (train, test) in enumerate(kf):
            X_train, X_test = X[train], X[test]
            y_train, y_test = y[train], y[test]

            if undersample:
                # Get all the targets that are not relevant i.e., y = 0
                idx_undersample = np.where(y_train == -1)[0]

                # Get all the targets that are relevant i.e., y = 1
                idx_positive = np.where(y_train == 1)[0]
                # Now sample from the no relevant targets
                random_negative_sample = np.random.choice(idx_undersample, idx_positive.shape[0])

                X_train_positive = X_train[idx_positive]

                X_train_negative = X_train[random_negative_sample]

                X_train_undersample = np.hstack((X_train_positive, X_train_negative))

                y_train_positive = y_train[idx_positive]
                y_train_negative = y_train[random_negative_sample]
                y_train_undersample = np.hstack((y_train_positive, y_train_negative))

            count_vec = CountVectorizer(ngram_range=(1, 3), max_features=50000)

            count_vec.fit(X_train)

            if undersample:
                X_train = X_train_undersample
                y_train = y_train_undersample

            X_train_undersample = count_vec.transform(X_train)
            X_test = count_vec.transform(X_test)

            svm = SVM()
            svm.train(X_train_undersample, y_train)
            svm.test(X_test, y_test)

            f1_score = svm.metrics["F1"]
            precision = svm.metrics["Precision"]
            recall = svm.metrics["Recall"]
            auc = svm.metrics["AUC"]
            accuracy = svm.metrics["Accuracy"]

            fold_accuracies.append(accuracy)
            fold_precisions.append(precision)
            fold_recalls.append(recall)
            fold_aucs.append(auc)
            fold_f1s.append(f1_score)

        average_accuracy = np.mean(fold_accuracies)
        average_precision = np.mean(fold_precisions)
        average_recall = np.mean(fold_recalls)
        average_auc = np.mean(fold_aucs)
        average_f1 = np.mean(fold_f1s)

        print("Fold Average Accuracy: {}".format(average_accuracy))
        print("Fold Average F1: {}".format(average_f1))
        print("Fold Average Precision: {}".format(average_precision))
        print("Fold Average AUC: {}".format(average_auc))
        print("Fold Average Recall: {}".format(average_recall))
        print('\n')
Exemple #39
0
def plot(predictor, X, y, grid_size):
    x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
    y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
    xx, yy = np.meshgrid(np.linspace(x_min, x_max, grid_size), np.linspace(y_min, y_max, grid_size), indexing="ij")
    flatten = lambda m: np.array(m).reshape(-1)

    result = []
    for (i, j) in itertools.product(range(grid_size), range(grid_size)):
        point = np.array([xx[i, j], yy[i, j]]).reshape(1, 2)
        result.append(predictor.predict(point))

    Z = np.array(result).reshape(xx.shape)

    plt.contourf(xx, yy, Z, cmap=cm.Paired, levels=[-0.001, 0.001], extend="both", alpha=0.8)
    plt.scatter(flatten(X[:, 0]), flatten(X[:, 1]), c=flatten(y), cmap=cm.Paired)
    plt.xlim(x_min, x_max)
    plt.ylim(y_min, y_max)
    plt.show()


num_samples = 500
num_features = 2
grid_size = 20
samples = np.matrix(np.random.normal(size=num_samples * num_features).reshape(num_samples, num_features))
labels = 2 * (samples.sum(axis=1) > 0) - 1.0
model = SVM(1.0, Kernel.linear())
print samples[0]
model.fit(samples, labels)
plot(model, samples, labels, grid_size)
Exemple #40
0
def transfer_learning(print_output=True):
    path = 'datasets/'
    data_loader = DataLoader(path)
    names = {1: 'title', 4: 'abstract', 5: 'mesh', 'y': 6}
    transformed_data_sets = []

    path = 'datasets/'

    files = [f for f in listdir(path) if isfile(join(path,f))]
    files.pop(0)
    data_loader = DataLoader(path)
    domains = data_loader.csv_files
    all_domains = copy.deepcopy(domains)
    training_domains = data_loader.csv_files
    all_domains_svm_wda_metrics_list = []
    all_domains_svm_metrics_list = []
    all_domains_svm_bow_mlp_list = []
    all_domains_mlp_fold_scores = []

    for i, held_out_domain in enumerate(domains):
        training_domains.pop(i)
        names = {1: 'title', 4: 'abstract', 5: 'mesh', 'y': 6}
        svm_wda_metrics_list = []
        svm_metrics_list = []
        svm_bow_mlp_list = []

        folder_name = '/' + files[i]
        domain_name = files[i].__str__()
        domain_name = domain_name.split('.')[0]
        folder_name = 'output' + '/' + domain_name

        output = "Dataset: {}".format(files[i])
        if print_output:
            print(output)

        #shuffle(data_loader.csv_files)
        data_loader.csv_files = training_domains
        data_sets = data_loader.csv_files
        domains = data_loader.get_feature_matrix(names)

        #Get one file out of the csv files in the dataloader use this as the held out domain

        #Get the feature representation of the held out data
        held_out_x, held_out_y = data_loader.get_feature_matrix(names, held_out_domain)
        #Create the folds for the held out data in this case the default 5
        folds = data_loader.cross_fold_valdation(held_out_x, held_out_y)
        #Get the total number of domains i.e., the number of files with documents
        n_source_domains = len(data_sets)
        os.makedirs(folder_name)

        #Must convert the data type of the matrix for theano
        feature_engineer = Feature_Engineer()

        #Start the 5 fold cross validation
        for n_fold, fold in enumerate(folds):
            output = "Fold {}: \n".format(n_fold)
            if print_output:
                print(output)
            output = '{}/{}/fold_{}.csv'.format(os.getcwd(), folder_name, (n_fold + 1))
            file = open(output, 'w')
            csv_writer = csv.writer(file)

            #Each sample is a list that contains the x and y for the classifier
            #Typically fold[0] would be the train sample but because it is switched for
            #testing the effectiveness of the domain adaptation
            train_sample = fold[1]
            test_sample = fold[0]

            #These are the original copies to be copied over the augmented feature matrix
            #Each sample contains the text and y labels from the data before it is put into the sklearn count vectorizer
            train_x, train_y = train_sample
            test_x, test_y = test_sample

            train_y[train_y == 0] = 2
            train_y[train_y == 1] = 3
            test_y[test_y == 0] = 2
            test_y[test_y == 1] = 3


            #Get the bag of words representation of the small 20% target source data and transform the other 80%
            #of the data.
            train_x = data_loader.get_transformed_features(train_x, True, False, True)
            test_x = data_loader.transform(test_x, True, True)

            transformed_domains = []

            #Transform the domains with respect to the training data
            for domain in domains:
                domain_x, domain_y = domain
                transformed_domain_x = data_loader.transform(domain_x, True, True)
                transformed_domain_x, domain_y = data_loader.underSample(transformed_domain_x, domain_y)
                transformed_domains.append([transformed_domain_x, domain_y])

            augmented_feature_matrix_train, augmented_y_train = feature_engineer.augmented_feature_matrix(transformed_domains,
                                                                                              [train_x, train_y])
            augmented_feature_matrix_test, augmented_y_test = feature_engineer.augmented_feature_matrix(held_out_domain=[test_x, test_y],
                                                                                                        train_or_test=False,
                                                                                                        n_source_domains=len(transformed_domains))
            augmented_y_test[augmented_y_test == 2] = 0
            augmented_y_test[augmented_y_test == 3] = 1
            #SVM with the augmented feature matrix for domain adaptation
            svm_wda = SVM()
            svm_wda.train(augmented_feature_matrix_train, augmented_y_train)
            svm_wda.test(augmented_feature_matrix_test, augmented_y_test)
            output = "\nSVM with domain adaptation metrics:"
            csv_writer.writerow([output])
            if print_output:
                print(output)
                print(svm_wda)
                print("\n")
            svm_wda_metrics_list.append(svm_wda.metrics)

            classifier = NeuralNet(n_hidden_units=[250], output_size=4, batch_size=20, n_epochs=200, dropout=True,
                                   activation_function='relu', learning_rate=.3, momentum=True, momentum_term=.5)
            write_to_csv(svm_wda.metrics, csv_writer)


            y_for_mlp = []
            #Set up the x and y data for the MLP
            for p, domain in enumerate(transformed_domains):
                domain_x, domain_y = domain
                domain_x = domain_x.todense()
                y_for_mlp.append(domain_y)

                if p == 0:
                    neural_net_x_train = domain_x
                    neural_net_y_train = domain_y
                else:
                    neural_net_x_train = numpy.vstack((neural_net_x_train, domain_x))
                    neural_net_y_train = numpy.hstack((neural_net_y_train, domain_y))

            neural_net_x_train = numpy.float_(neural_net_x_train)


            classifier.train(neural_net_x_train, neural_net_y_train)

            test_y[test_y == 2] = 0
            test_y[test_y == 3] = 1
            svm_y_train = neural_net_y_train
            svm_y_train[svm_y_train == 2] = 0
            svm_y_train[svm_y_train == 3] = 1

            #SVM without the domain adaptation
            svm = SVM()
            svm.train(sparse.coo_matrix(neural_net_x_train), svm_y_train)
            svm.test(test_x, test_y)
            output = "\nSVM without domain adaptation"
            if print_output:
                print(output)
                print(svm)
                print("\n")
            csv_writer.writerow([output])
            svm_metrics_list.append(svm.metrics)
            write_to_csv(svm.metrics, csv_writer)


            #Transform the feature vectors of the held out data to the learned hidden layer features of the previous
            #MLP trained with all n-1 datasets

            perceptron_train_x = theano.shared(neural_net_x_train)
            perceptron_test_x = theano.shared(test_x.todense())

            transformed_perceptron_train_x = classifier.transfer_learned_weights(perceptron_train_x)
            transformed_perceptron_test_x = classifier.transfer_learned_weights(perceptron_test_x)

            modified_transformed_perceptron_train_x = numpy.hstack((transformed_perceptron_train_x,
                                                                    neural_net_x_train))
            modified_transformed_perceptron_test_x = numpy.hstack((transformed_perceptron_test_x,
                                                                   test_x.todense()))

            output = "\nSVM with BoW and transformed features"
            csv_writer.writerow([output])
            if print_output:
                print(output)
            svm_mlp_bow = SVM()
            svm_mlp_bow.train(sparse.coo_matrix(modified_transformed_perceptron_train_x), svm_y_train)
            svm_mlp_bow.test(sparse.coo_matrix(modified_transformed_perceptron_test_x), test_y)
            write_to_csv(svm_mlp_bow.metrics, csv_writer)
            if print_output:
                print(svm_mlp_bow)
            svm_bow_mlp_list.append(svm_mlp_bow.metrics)


            output = "*********** End of fold {} ***********".format(n_fold)

            if print_output:
                print(output)


        training_domains = copy.deepcopy(all_domains)
        file_name = '{}/{}/fold_averages.csv'.format(os.getcwd(), folder_name)
        file = open(file_name, 'w+')
        csv_writer = csv.writer(file)

        if print_output:
            output = "----------------------------------------------------------------------------------------" \
                     "\nFold Scores\n " \
                     "SVM with domain adaptation"
            print_write_output(output, svm_wda_metrics_list, all_domains_svm_wda_metrics_list, csv_writer)

            output = "\nSVM without domain adaptation"
            print_write_output(output, svm_metrics_list, all_domains_svm_metrics_list, csv_writer)

            output = "SVM with BoW and transformed features"
            print_write_output(output, svm_bow_mlp_list, all_domains_svm_bow_mlp_list, csv_writer)



    file_name = '{}/output/all_fold_averages.csv'.format(os.getcwd())
    file = open(file_name, 'w+')
    csv_writer = csv.writer(file)
    if print_output:
        output = "*******************************************************************************************" \
                 "\nAll domain macro metric scores\n " \
                 "SVM with domain adaptation"
        print_macro_scores("SVM with domain adaptation", all_domains_svm_wda_metrics_list, csv_writer)

        output = "\nSVM without domain adaptation"
        print_macro_scores(output, all_domains_svm_metrics_list, csv_writer)

        output = "SVM with BoW and transformed features"
        print_macro_scores(output, all_domains_svm_bow_mlp_list, csv_writer)
def main():
    try:
        opts, args = getopt.getopt(sys.argv[1:], '', ['n_feature_maps=', 'epochs=', 'max_words=', 'dropout_p=',
                                                      'undersample=', 'n_feature_maps=', 'criterion=',
                                                      'optimizer=', 'max_words=', 'layers=',
                                                      'hyperopt=', 'experiment_name=', 'w2v_path=', 'tacc=',
                                                      'use_all_date=', 'tacc=', 'pretrain=', 'undersample_all=',
                                                      'save_model=', 'transfer_learning='])
    except getopt.GetoptError as error:
        print(error)
        sys.exit(2)

    w2v_path = '/Users/ericrincon/PycharmProjects/Deep-PICO/wikipedia-pubmed-and-PMC-w2v.bin'
    epochs = 50
    criterion = 'categorical_crossentropy'
    optimizer = 'adam'
    experiment_name = 'abstractCNN'
    w2v_size = 200
    activation = 'relu'
    dense_sizes = [400, 400]
    max_words = {'text': 270, 'mesh': 50, 'title': 17}

    filter_sizes = {'text': [2, 3, 4, 5],
                    'mesh': [2, 3, 4, 5],
                    'title': [2, 3, 4, 5]}
    n_feature_maps = {'text': 100, 'mesh': 50, 'title': 50}
    word_vector_size = 200
    using_tacc = False
    undersample = False
    use_embedding = False
    embedding = None
    use_all_date = False
    patience = 50
    p = .5
    verbose = 0
    pretrain = True
    filter_small_data = True
    save_model = False
    load_data_from_scratch = False
    print_output = True
    transfer_learning = False

    for opt, arg in opts:
        if opt == '--save_model':
            if int(arg) == 0:
                save_model = False
            elif int(arg) ==  1:
                save_model = True
        elif opt == '--transfer_learning':
            if int(arg) == 1:
                transfer_learning = True
            elif int(arg) == 0:
                transfer_learning = False
        elif opt == '--undersample_all':
            if int(arg) == 0:
                undersample_all = False
            elif int(arg) == 1:
                undersample_all = True
        elif opt == '--pretrain':
            if int(arg) == 0:
                pretrain = False
            elif int(arg) == 1:
                pretrain = True
            else:
                print("Invalid input")

        elif opt == '--verbose':
            verbose = int(arg)
        elif opt == '--use_embedding':
            if int(arg) == 0:
                use_embedding = False
        elif opt == '--dropout_p':
            p = float(arg)
        elif opt == '--epochs':
            epochs = int(arg)
        elif opt == '--layers':
            layer_sizes = arg.split(',')
        elif opt == '--n_feature_maps':
            n_feature_maps = int(arg)
        elif opt == '--n_feature_maps':
            n_feature_maps = int(arg)
        elif opt == '--criterion':
            criterion = arg
        elif opt == '--optimizer':
            optimizer = arg
        elif opt == '--tacc':
            if int(arg) == 1:
                using_tacc = True
        elif opt == '--hyperopt':
            if int(arg) == 1:
                hyperopt = True
        elif opt == '--experiment_name':
            experiment_name = arg
        elif opt == '--max_words':
            max_words = int(arg)
        elif opt == '--w2v_path':
            w2v_path = arg
        elif opt == '--word_vector_size':
            word_vector_size = int(arg)
        elif opt == '--use_all_data':
            if int(arg) == 1:
                use_all_date = True
        elif opt == '--patience':
            patience = int(arg)

        elif opt == '--undersample':
            if int(arg) == 0:
                undersample = False
            elif int(arg) == 1:
                undersample = True
        elif opt == '--tacc':
            if int(arg) == 1:
                using_tacc = True

        else:
            print("Option {} is not valid!".format(opt))


    if using_tacc:
        nltk.data.path.append('/work/03186/ericr/nltk_data/')
    print('Loading data...')

    if load_data_from_scratch:

        print('Loading Word2Vec...')
        w2v = Word2Vec.load_word2vec_format(w2v_path, binary=True)
        print('Loaded Word2Vec...')
        X_list = []
        y_list = []

        if use_embedding:

            X_list, y_list, embedding_list = DataLoader.get_data_as_seq(w2v, w2v_size, max_words)

        else:
            X_list, y_list = DataLoader.get_data_separately(max_words, word_vector_size,
                                                            w2v, use_abstract_cnn=True,
                                                            preprocess_text=False,
                                                            filter_small_data=filter_small_data)
    else:
        X_list, y_list = DataLoader.load_datasets_from_h5py('DataProcessed', True)


    print('Loaded data...')
    dataset_names = DataLoader.get_all_files('DataProcessed')
    dataset_names = [x.split('/')[-1].split('.')[0] for x in dataset_names]

    results_file = open(experiment_name + "_results.txt", "w+")

    for dataset_i, (X, y) in enumerate(zip(X_list, y_list)):
        if use_embedding:
            embedding = embedding_list[dataset_i]

        model_name = dataset_names[dataset_i]

        print("Dataset: {}".format(model_name))

        results_file.write(model_name)
        results_file.write("Dataset: {}".format(model_name))

        X_abstract, X_title, X_mesh = X['text'], X['title'], X['mesh']
        n = X_abstract.shape[0]
        kf = KFold(n, random_state=1337, shuffle=True, n_folds=5)

        if pretrain:
            pretrain_fold_accuracies = []
            pretrain_fold_recalls = []
            pretrain_fold_precisions =[]
            pretrain_fold_aucs = []
            pretrain_fold_f1s = []

        if transfer_learning:
            svm_fold_accuracies = []
            svm_fold_recalls = []
            svm_fold_precisions =[]
            svm_fold_aucs = []
            svm_fold_f1s = []

        fold_accuracies = []
        fold_recalls = []
        fold_precisions =[]
        fold_aucs = []
        fold_f1s = []

        for fold_idx, (train, test) in enumerate(kf):
            temp_model_name = experiment_name + '_' + model_name + '_fold_{}'.format(fold_idx + 1)


            cnn = AbstractCNN(n_classes=2,  max_words=max_words, w2v_size=w2v_size, vocab_size=1000, use_embedding=use_embedding,
                              filter_sizes=filter_sizes, n_feature_maps=n_feature_maps, dense_layer_sizes=dense_sizes.copy(),
                              name=temp_model_name, activation_function=activation, dropout_p=p, embedding=embedding)

            if pretrain:
                X_abstract_train = X_abstract[train, :, :]
                X_title_train = X_title[train, :, :]
                X_mesh_train = X_mesh[train, :, :]

                y_train = y[train, :]

                X_abstract_test = X_abstract[test, :, :]
                X_title_test = X_title[test, :, :]
                X_mesh_test = X_mesh[test, :, :]

                y_test = y[test, :]

                for i, (_x, _y) in enumerate(zip(X_list, y_list)):
                    if not i == dataset_i:
                        X_abstract_train = np.vstack((X_abstract_train, _x['text'][()]))
                        X_title_train = np.vstack((X_title_train, _x['title'][()]))
                        X_mesh_train = np.vstack((X_mesh_train, _x['mesh'][()]))
                        y_train = np.vstack((y_train, _y[()]))
                print(X_abstract_train.shape)

                cnn.train(X_abstract_train, X_title_train, X_mesh_train, y_train, n_epochs=epochs,
                          optim_algo=optimizer, criterion=criterion, verbose=verbose, patience=patience,
                          save_model=save_model)


                accuracy, f1_score, precision, auc, recall = cnn.test(X_abstract_test, X_title_test, X_mesh_test, y_test,
                                                                      print_output=True)

                print("Results from training on all data only")

                print("Accuracy: {}".format(accuracy))
                print("F1: {}".format(f1_score))
                print("Precision: {}".format(precision))
                print("AUC: {}".format(auc))
                print("Recall: {}".format(recall))
                print("\n")

                pretrain_fold_accuracies.append(accuracy)
                pretrain_fold_precisions.append(precision)
                pretrain_fold_recalls.append(recall)
                pretrain_fold_aucs.append(auc)
                pretrain_fold_f1s.append(f1_score)

            if not use_embedding:
                X_abstract_train = X_abstract[train, :, :]
                X_title_train = X_title[train, :, :]
                X_mesh_train = X_mesh[train, :, :]
                y_train = y[train, :]

                X_abstract_test = X_abstract[test, :, :]
                X_titles_test = X_title[test, :, :]
                X_mesh_test = X_mesh[test, :, :]
                y_test = y[test, :]

            elif use_embedding:
                X_abstract_train = X_abstract[train]
                X_title_train = X_title[train]
                X_mesh_train = X_mesh[train]
                y_train = y[train, :]

                X_abstract_test = X_abstract[test]
                X_titles_test = X_title[test]
                X_mesh_test = X_mesh[test]
                y_test = y[test, :]

                if undersample:
                    X_abstract_train, X_title_train, X_mesh_train, y_train = \
                        DataLoader.undersample_seq(X_abstract_train, X_title_train, X_mesh_train, y_train)

            cnn.train(X_abstract_train, X_title_train, X_mesh_train, y_train, n_epochs=epochs, optim_algo=optimizer,
                      criterion=criterion, verbose=verbose, patience=patience,
                      save_model=save_model)
            accuracy, f1_score, precision, auc, recall = cnn.test(X_abstract_test, X_titles_test, X_mesh_test, y_test,
                                                                  print_output)

            if transfer_learning:
                svm = SVM()

                # Transfer weights
                X_transfer_train = cnn.output_learned_features([X_abstract_train, X_title_train, X_mesh_train])
                X_transfer_test = cnn.output_learned_features([X_abstract_test, X_titles_test, X_mesh_test])

                svm.train(X_transfer_train, DataLoader.onehot2list(y_train))
                svm.test(X_transfer_test, DataLoader.onehot2list(y_test))

                print("\nSVM results")
                print(svm)
                print('\n')

                svm_fold_accuracies.append(svm.metrics['Accuracy'])
                svm_fold_precisions.append(svm.metrics['Precision'])
                svm_fold_aucs.append(svm.metrics['AUC'])
                svm_fold_recalls.append(svm.metrics['Recall'])
                svm_fold_f1s.append(svm.metrics['F1'])

            print('CNN results')
            print("Accuracy: {}".format(accuracy))
            print("F1: {}".format(f1_score))
            print("Precision: {}".format(precision))
            print("AUC: {}".format(auc))
            print("Recall: {}".format(recall))

            fold_accuracies.append(accuracy)
            fold_precisions.append(precision)
            fold_recalls.append(recall)
            fold_aucs.append(auc)
            fold_f1s.append(f1_score)



        if pretrain:
            pretrain_average_accuracy = np.mean(pretrain_fold_accuracies)
            pretrain_average_precision = np.mean(pretrain_fold_precisions)
            pretrain_average_recall = np.mean(pretrain_fold_recalls)
            pretrain_average_auc = np.mean(pretrain_fold_aucs)
            pretrain_average_f1 = np.mean(pretrain_fold_f1s)

            print("\nAverage results from using all data")
            print("Fold Average Accuracy: {}".format(pretrain_average_accuracy))
            print("Fold Average F1: {}".format(pretrain_average_f1))
            print("Fold Average Precision: {}".format(pretrain_average_precision))
            print("Fold Average AUC: {}".format(pretrain_average_auc))
            print("Fold Average Recall: {}".format(pretrain_average_recall))
            print('\n')



        average_accuracy = np.mean(fold_accuracies)
        average_precision = np.mean(fold_precisions)
        average_recall = np.mean(fold_recalls)
        average_auc = np.mean(fold_aucs)
        average_f1 = np.mean(fold_f1s)


        print('CNN Results')
        print("Fold Average Accuracy: {}".format(average_accuracy))
        print("Fold Average F1: {}".format(average_f1))
        print("Fold Average Precision: {}".format(average_precision))
        print("Fold Average AUC: {}".format(average_auc))
        print("Fold Average Recall: {}".format(average_recall))
        print('\n')

        results_file.write("CNN results\n")
        results_file.write("Fold Average Accuracy: {}\n".format(average_accuracy))
        results_file.write("Fold Average F1: {}\n".format(average_f1))
        results_file.write("Fold Average Precision: {}\n".format(average_precision))
        results_file.write("Fold Average AUC: {}\n".format(average_auc))
        results_file.write("Fold Average Recall: {}\n".format(average_recall))
        results_file.write('\n')

        if transfer_learning:
            average_accuracy = np.mean(svm_fold_accuracies)
            average_precision = np.mean(svm_fold_precisions)
            average_recall = np.mean(svm_fold_recalls)
            average_auc = np.mean(svm_fold_aucs)
            average_f1 = np.mean(svm_fold_f1s)

            print("SVM with cnn features")
            print("Fold Average Accuracy: {}".format(average_accuracy))
            print("Fold Average F1: {}".format(average_f1))
            print("Fold Average Precision: {}".format(average_precision))
            print("Fold Average AUC: {}".format(average_auc))
            print("Fold Average Recall: {}".format(average_recall))
            print('\n')

            results_file.write("SVM with cnn features\n")
            results_file.write("Fold Average Accuracy: {}\n".format(average_accuracy))
            results_file.write("Fold Average F1: {}\n".format(average_f1))
            results_file.write("Fold Average Precision: {}\n".format(average_precision))
            results_file.write("Fold Average AUC: {}\n".format(average_auc))
            results_file.write("Fold Average Recall: {}\n".format(average_recall))
            results_file.write('\n')
Exemple #42
0
# parameters
name = 'stdev2'
print '======Training======'
# load data from csv files
train = loadtxt('newData-2/data_'+name+'_train.csv')
#train = loadtxt('data/data_'+name+'_train.csv')
# use deep copy here to make cvxopt happy
X = train[:, 0:2].copy()
Y = train[:, 2:3].copy()

#X = np.array([[1.0,2.0],[2.0,2.0],[0.0,0.0],[-2.0,3.0]])
#Y = np.array([[1.0],[1.0],[-1.0],[-1.0]])

# Carry out training, primal and/or dual
C = 1
svm = SVM(X,Y,C)
svm.train()
#model = svm.train_gold()

# Define the predictSVM(x) function, which uses trained parameters
def predictSVM(x):
	return svm.test(x)
	#return svm.test_gold(x,model)


# plot training results
plotDecisionBoundary(X, Y, predictSVM, [-1, 0, 1], title = 'SVM Train')



print '======Validation======'