示例#1
0
 def handle(self):
     self.data = self.request.recv(MAX_REVIEW_SIZE)
     
     if self.data.lower() == "[stop]":
         print "Server is terminating... Please wait!"
         logger.debug("Server is terminating...")            
         self.server.terminate()
         self.request.send("0")
     else:            
         LinearSVM.db_connect(options.mysql_host, options.mysql_user, options.mysql_password, options.mysql_database)
         result = LinearSVM.predict(self.data)
         self.request.send(demjson.encode(result, encoding="utf-8"))
         logger.debug("%s: %s" % (self.data, result["score"]))
示例#2
0
    def handle(self):
        self.data = self.request.recv(MAX_REVIEW_SIZE)

        if self.data.lower() == "[stop]":
            print "Server is terminating... Please wait!"
            logger.debug("Server is terminating...")
            self.server.terminate()
            self.request.send("0")
        else:
            LinearSVM.db_connect(options.mysql_host, options.mysql_user,
                                 options.mysql_password,
                                 options.mysql_database)
            result = LinearSVM.predict(self.data)
            self.request.send(demjson.encode(result, encoding="utf-8"))
            logger.debug("%s: %s" % (self.data, result["score"]))
        x_val, (num_val, x_val.shape[1] * x_val.shape[2] * x_val.shape[3]))
    x_test = np.resize(
        x_test,
        (num_test, x_test.shape[1] * x_test.shape[2] * x_test.shape[3]))

    # 堆叠数组
    x_train = np.hstack([x_train, np.ones((x_train.shape[0], 1))])
    x_val = np.hstack([x_val, np.ones((x_val.shape[0], 1))])
    x_test = np.hstack([x_test, np.ones((x_test.shape[0], 1))])

    svm = LinearSVM()
    loss_history = svm.train(x_train,
                             y_train,
                             learning_rate=1e-7,
                             reg=2.5e4,
                             num_iters=2000,
                             batch_size=200,
                             print_flag=True)

    y_train_pred = svm.predict(x_train)
    num_correct = np.sum(y_train_pred == y_train)
    accuracy = np.mean(y_train_pred == y_train)
    print('Training correct %d/%d: The accuracy is %f' %
          (num_correct.real, x_train.shape[0], accuracy.real))

    y_test_pred = svm.predict(x_test)
    num_correct = np.sum(y_test_pred == y_test)
    accuracy = np.mean(y_test_pred == y_test)
    print('Test correct %d/%d: The accuracy is %f' %
          (num_correct.real, x_test.shape[0], accuracy.real))

    learning_rates = [1.4e-7, 1.5e-7, 1.6e-7]
    regularization_strengths = [8000.0, 9000.0, 10000.0, 11000.0, 18000.0, 19000.0, 20000.0, 21000.0]

    results = {}
    best_lr = None
    best_reg = None
    best_val = -1  # The highest validation accuracy that we have seen so far.
    best_svm = None  # The LinearSVM object that achieved the highest validation rate.

    for lr in learning_rates:
        for reg in regularization_strengths:
            svm = LinearSVM()
            loss_history = svm.train(x_train, y_train, learning_rate=lr, reg=reg, num_iters=2000)
            y_train_pred = svm.predict(x_train)
            accuracy_train = np.mean(y_train_pred == y_train)
            y_val_pred = svm.predict(x_val)
            accuracy_val = np.mean(y_val_pred == y_val)
            if accuracy_val > best_val:
                best_lr = lr
                best_reg = reg
                best_val = accuracy_val
                best_svm = svm
            results[(lr, reg)] = accuracy_train, accuracy_val
            print('lr: %e reg: %e train accuracy: %f val accuracy: %f' %
                  (lr, reg, results[(lr, reg)][0].real, results[(lr, reg)][1].real))
    print('Best validation accuracy during cross-validation:\nlr = %e, reg = %e, best_val = %f' %
          (best_lr, best_reg, best_val))

    y_test_pred = best_svm.predict(x_test)