Ejemplo n.º 1
0
def test_accuracy(sess, summ_op, acc_val, model, data_handler, config):

    svm_train_size = 100

    x_syn_data, label_syn_data = None, []

    z_pl = tf.placeholder(tf.float32, shape=[None, config.z_dim])
    c_pl = tf.placeholder(tf.float32, shape=[None, config.attr_dim])
    G_samples = model.G(z_pl, c_pl, reuse=True, is_training=False)

    z = sample_z(svm_train_size, config.z_dim)
    for idx, ci_attr in enumerate(data_handler.test_attr):
        xi_syn_data = sess.run([G_samples],
                               feed_dict={
                                   z_pl: z,
                                   c_pl: np.tile(ci_attr, (svm_train_size, 1))
                               })
        xi_syn_data = np.squeeze(xi_syn_data, axis=0).reshape(1, -1)
        if x_syn_data is None:
            x_syn_data = xi_syn_data
        else:
            x_syn_data = np.vstack((x_syn_data, xi_syn_data))

        label_syn_data.extend(data_handler.test_label[idx] *
                              np.ones(svm_train_size))

    svm_model = LinearSVM(config)
    svm_model.train(x_syn_data, label_syn_data)
    accuracy = svm_model.measure_accuracy(data_handler.test_data,
                                          data_handler.test_label)

    summ = sess.run(summ_op, feed_dict={acc_val: accuracy})
    return summ
Ejemplo n.º 2
0
def predictor():

    print "Loading svm model..."
    LinearSVM.init(args.config)

    server = MyTCPServer((HOST, PORT), MyTCPHandler)
    print "Server started. Listening for reviews..."
    server.serve_forever()
Ejemplo n.º 3
0
def predictor():

    print "Loading svm model..."
    LinearSVM.init(args.config)

    server = MyTCPServer((HOST, PORT), MyTCPHandler)
    print "Server started. Listening for reviews..."
    server.serve_forever()
Ejemplo n.º 4
0
 def handle(self):
     self.data = self.request.recv(MAX_REVIEW_SIZE)
     
     if self.data.lower() == "[stop]":
         print "Server is terminating... Please wait!"
         logger.debug("Server is terminating...")            
         self.server.terminate()
         self.request.send("0")
     else:            
         LinearSVM.db_connect(options.mysql_host, options.mysql_user, options.mysql_password, options.mysql_database)
         result = LinearSVM.predict(self.data)
         self.request.send(demjson.encode(result, encoding="utf-8"))
         logger.debug("%s: %s" % (self.data, result["score"]))
Ejemplo n.º 5
0
    def handle(self):
        self.data = self.request.recv(MAX_REVIEW_SIZE)

        if self.data.lower() == "[stop]":
            print "Server is terminating... Please wait!"
            logger.debug("Server is terminating...")
            self.server.terminate()
            self.request.send("0")
        else:
            LinearSVM.db_connect(options.mysql_host, options.mysql_user,
                                 options.mysql_password,
                                 options.mysql_database)
            result = LinearSVM.predict(self.data)
            self.request.send(demjson.encode(result, encoding="utf-8"))
            logger.debug("%s: %s" % (self.data, result["score"]))
Ejemplo n.º 6
0
    # Make a forward-pass to obtain an output.
    outputs = model(X)

    # Get the loss for these predictions
    loss = multiclass_hinge_loss(outputs, y)
    # Add the l2 regularization term
    loss += c * torch.mean(model.linear.weight**2)
    # Do the backpropagation step
    loss.backward()
    # Update weights
    optimizer.step()
    return loss


# Initialize our model
model = LinearSVM()

l = DEFAULT_LEARNING_RATE
# Create a Stochastic Gradient Descent optimizer to optimize the model's
# parameters using the given learning rate.
optimizer = optim.SGD(model.parameters(), l)

# Initialize the 'scheduler' which dynamically updates our learning rate
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)

print("Starting training with learning rate {} and {} epochs".format(
    DEFAULT_LEARNING_RATE, EPOCHS))

for epoch in range(EPOCHS):
    print("Epoch", epoch)
    total_loss = 0
Ejemplo n.º 7
0
    x_train = np.resize(
        x_train,
        (num_train, x_train.shape[1] * x_train.shape[2] * x_train.shape[3]))
    x_val = np.resize(
        x_val, (num_val, x_val.shape[1] * x_val.shape[2] * x_val.shape[3]))
    x_test = np.resize(
        x_test,
        (num_test, x_test.shape[1] * x_test.shape[2] * x_test.shape[3]))

    # 堆叠数组
    x_train = np.hstack([x_train, np.ones((x_train.shape[0], 1))])
    x_val = np.hstack([x_val, np.ones((x_val.shape[0], 1))])
    x_test = np.hstack([x_test, np.ones((x_test.shape[0], 1))])

    svm = LinearSVM()
    loss_history = svm.train(x_train,
                             y_train,
                             learning_rate=1e-7,
                             reg=2.5e4,
                             num_iters=2000,
                             batch_size=200,
                             print_flag=True)

    y_train_pred = svm.predict(x_train)
    num_correct = np.sum(y_train_pred == y_train)
    accuracy = np.mean(y_train_pred == y_train)
    print('Training correct %d/%d: The accuracy is %f' %
          (num_correct.real, x_train.shape[0], accuracy.real))

    y_test_pred = svm.predict(x_test)
    x_val = np.hstack([x_val, np.ones((x_val.shape[0], 1))])
    x_test = np.hstack([x_test, np.ones((x_test.shape[0], 1))])


    learning_rates = [1.4e-7, 1.5e-7, 1.6e-7]
    regularization_strengths = [8000.0, 9000.0, 10000.0, 11000.0, 18000.0, 19000.0, 20000.0, 21000.0]

    results = {}
    best_lr = None
    best_reg = None
    best_val = -1  # The highest validation accuracy that we have seen so far.
    best_svm = None  # The LinearSVM object that achieved the highest validation rate.

    for lr in learning_rates:
        for reg in regularization_strengths:
            svm = LinearSVM()
            loss_history = svm.train(x_train, y_train, learning_rate=lr, reg=reg, num_iters=2000)
            y_train_pred = svm.predict(x_train)
            accuracy_train = np.mean(y_train_pred == y_train)
            y_val_pred = svm.predict(x_val)
            accuracy_val = np.mean(y_val_pred == y_val)
            if accuracy_val > best_val:
                best_lr = lr
                best_reg = reg
                best_val = accuracy_val
                best_svm = svm
            results[(lr, reg)] = accuracy_train, accuracy_val
            print('lr: %e reg: %e train accuracy: %f val accuracy: %f' %
                  (lr, reg, results[(lr, reg)][0].real, results[(lr, reg)][1].real))
    print('Best validation accuracy during cross-validation:\nlr = %e, reg = %e, best_val = %f' %
          (best_lr, best_reg, best_val))