Ejemplo n.º 1
0
def test_train_model_svr(data_train_test, model_file_name):
    model = make_pipeline(
        DropColumn('timeStamp'),
        StandardScaler(),
        SVR(C=1.e5, gamma=0.005, kernel='rbf'),
    )
    train_model(data_train_test, model, model_file_name)
Ejemplo n.º 2
0
    def setup_model(self, model):
        global status_data
        global my_model

        status_data['text'] = "Loading data..."

        (x_train, y_train), (x_test,
                             y_test) = tf.keras.datasets.mnist.load_data()

        print(type(x_train))
        print(x_train.shape)
        print(type(x_train[0]))
        print(x_train[0].shape)

        x_train_normalized = x_train / 255
        x_test_normalized = x_test / 255

        status_data['text'] = "Training model..."

        # The following variables are the hyperparameters.
        learning_rate = 0.003
        epochs = 50
        batch_size = 4000
        validation_split = 0.2

        # Establish the model's topography.
        my_model = model.create_model(learning_rate)

        # Train the model on the normalized training set.
        epochs, hist = model.train_model(my_model, x_train_normalized, y_train,
                                         epochs, batch_size, validation_split)

        # Plot a graph of the metric vs. epochs.
        list_of_metrics_to_plot = ['accuracy']
        model.plot_curve(epochs, hist, list_of_metrics_to_plot)

        # Evaluate against the test set.
        print("\n Evaluate the new model against the test set:")
        my_model.evaluate(x=x_test_normalized, y=y_test, batch_size=batch_size)

        status_data['text'] = "Model trained."
Ejemplo n.º 3
0
    dev_images1 = dev_images1.astype('float32') / 255

    # general categoricla variable
    # notice I am add "s" at train_label, to distinguish from earlier numpy.
    train_labels = to_categorical(train_label_np)
    dev_labels = to_categorical(dev_label_np)

    train_labels1 = to_categorical(train_label_np1)
    dev_labels1 = to_categorical(dev_label_np1)

    train_labels_merged = merge_labels(train_labels, train_labels1)
    dev_labels_merged = merge_labels(dev_labels, dev_labels1)

    sample_size = dev_labels_merged.shape[0]  # sample size

    for i in range(sample_size):
        print(train_labels_merged[i, :], ",", train_label_np[i], ",",
              train_label_np1[i])

    params = {}
    params['height'] = height
    params['width'] = width
    params['channel'] = channel

    model = build_model(True, params)
    history = train_model(model, train_labels_merged, train_images,
                          dev_labels_merged, dev_images)

    # print the graph of learning history for diagnostic purpose.
    print_plot_keras_metrics(history)
Ejemplo n.º 4
0
    x2 = data.normalize(x2, 0)
    n_clusters = len(set(gt))

    #act_ae1, act_ae2, act_dg1, act_dg2 = 'sigmoid', 'sigmoid', 'sigmoid', 'sigmoid'
    v1_aedims_ = [[x1.shape[1], 512, 256], [256, 512, x1.shape[1]]]

    v2_aedims_ = [[x2.shape[1], 512, 256], [256, 512, x2.shape[1]]]
    #原来的
    v1_aedims_ = [[x1.shape[1], 512, 128], [128, 512, x1.shape[1]]]
    v2_aedims_ = [[x2.shape[1], 512, 128], [128, 512, x2.shape[1]]]
    mae_dims_ = [[256, 128, 64], [256, 128, 64], [64, 128, 256],
                 [64, 128, 256]]
    mae_dims_ = [[128, 128, 64], [128, 128, 64], [64, 128, 128],
                 [64, 128, 128]]
    #现在用的
    #dims_dg1 = [64, 100]
    #dims_dg2 = [64, 100]
    dis_dims_ = [256, 128, 1]
    para_lambda = 1
    batch_size = 256

    epochs = 200

    model = MaeAEModel(v1_aedims=v1_aedims_,
                       v2_aedims=v2_aedims_,
                       mae_dims=mae_dims_,
                       dis_dims=dis_dims_)  #duaAE用的
    H, gt = model.train_model(x1, x2, gt, epochs, batch_size)
    #H,gt=model(x1, x2, gt, para_lambda, dims, act, lr, epochs, batch_size)
    print_result(n_clusters, H, gt)
Ejemplo n.º 5
0
                path="./output/model_deep.h5",
                mask=False,
                )
model.cnn_rnn_attn(hiddenDim=400,
                   )
train_x = [train_input,train_char,]
train_y = train_label
test_x = [test_input,test_char,]


# train
print("train...")
epoch = 5
model.train_model(x=train_x,y=train_y,
                  epoch=epoch,
                  batch_size=16,
                  validation_split=0.1,
                  )
print("predict...")
test_predict = model.predict(test_x)
print(type(test_predict))
print(test_predict.shape)
test_predict = np.argmax(test_predict, axis=2)
pred_tag = []
for i, tokens in enumerate(test_token):
    tag_sent = []
    for j, t in enumerate(tokens):
        try:
            tag_sent.append(tag_alphabet.get_instance(test_predict[i][j]))
        except IndexError:
            continue
Ejemplo n.º 6
0
def test_train_model_mlp(data_train_test, model_file_name):
    model = make_pipeline(DropColumn('timeStamp'), StandardScaler(),
                          MLPRegressor(alpha=1.e-4, learning_rate_init=10.))
    train_model(data_train_test, model, model_file_name)