コード例 #1
0
ファイル: gan.py プロジェクト: mahi97/Zero-Shot-Learning
def test_accuracy(sess, summ_op, acc_val, model, data_handler, config):

    svm_train_size = 100

    x_syn_data, label_syn_data = None, []

    z_pl = tf.placeholder(tf.float32, shape=[None, config.z_dim])
    c_pl = tf.placeholder(tf.float32, shape=[None, config.attr_dim])
    G_samples = model.G(z_pl, c_pl, reuse=True, is_training=False)

    z = sample_z(svm_train_size, config.z_dim)
    for idx, ci_attr in enumerate(data_handler.test_attr):
        xi_syn_data = sess.run([G_samples],
                               feed_dict={
                                   z_pl: z,
                                   c_pl: np.tile(ci_attr, (svm_train_size, 1))
                               })
        xi_syn_data = np.squeeze(xi_syn_data, axis=0).reshape(1, -1)
        if x_syn_data is None:
            x_syn_data = xi_syn_data
        else:
            x_syn_data = np.vstack((x_syn_data, xi_syn_data))

        label_syn_data.extend(data_handler.test_label[idx] *
                              np.ones(svm_train_size))

    svm_model = LinearSVM(config)
    svm_model.train(x_syn_data, label_syn_data)
    accuracy = svm_model.measure_accuracy(data_handler.test_data,
                                          data_handler.test_label)

    summ = sess.run(summ_op, feed_dict={acc_val: accuracy})
    return summ
コード例 #2
0
    x_val = np.resize(
        x_val, (num_val, x_val.shape[1] * x_val.shape[2] * x_val.shape[3]))
    x_test = np.resize(
        x_test,
        (num_test, x_test.shape[1] * x_test.shape[2] * x_test.shape[3]))

    # 堆叠数组
    x_train = np.hstack([x_train, np.ones((x_train.shape[0], 1))])
    x_val = np.hstack([x_val, np.ones((x_val.shape[0], 1))])
    x_test = np.hstack([x_test, np.ones((x_test.shape[0], 1))])

    svm = LinearSVM()
    loss_history = svm.train(x_train,
                             y_train,
                             learning_rate=1e-7,
                             reg=2.5e4,
                             num_iters=2000,
                             batch_size=200,
                             print_flag=True)

    y_train_pred = svm.predict(x_train)
    num_correct = np.sum(y_train_pred == y_train)
    accuracy = np.mean(y_train_pred == y_train)
    print('Training correct %d/%d: The accuracy is %f' %
          (num_correct.real, x_train.shape[0], accuracy.real))

    y_test_pred = svm.predict(x_test)
    num_correct = np.sum(y_test_pred == y_test)
    accuracy = np.mean(y_test_pred == y_test)
    print('Test correct %d/%d: The accuracy is %f' %
          (num_correct.real, x_test.shape[0], accuracy.real))
コード例 #3
0
    x_test = np.hstack([x_test, np.ones((x_test.shape[0], 1))])


    learning_rates = [1.4e-7, 1.5e-7, 1.6e-7]
    regularization_strengths = [8000.0, 9000.0, 10000.0, 11000.0, 18000.0, 19000.0, 20000.0, 21000.0]

    results = {}
    best_lr = None
    best_reg = None
    best_val = -1  # The highest validation accuracy that we have seen so far.
    best_svm = None  # The LinearSVM object that achieved the highest validation rate.

    for lr in learning_rates:
        for reg in regularization_strengths:
            svm = LinearSVM()
            loss_history = svm.train(x_train, y_train, learning_rate=lr, reg=reg, num_iters=2000)
            y_train_pred = svm.predict(x_train)
            accuracy_train = np.mean(y_train_pred == y_train)
            y_val_pred = svm.predict(x_val)
            accuracy_val = np.mean(y_val_pred == y_val)
            if accuracy_val > best_val:
                best_lr = lr
                best_reg = reg
                best_val = accuracy_val
                best_svm = svm
            results[(lr, reg)] = accuracy_train, accuracy_val
            print('lr: %e reg: %e train accuracy: %f val accuracy: %f' %
                  (lr, reg, results[(lr, reg)][0].real, results[(lr, reg)][1].real))
    print('Best validation accuracy during cross-validation:\nlr = %e, reg = %e, best_val = %f' %
          (best_lr, best_reg, best_val))