예제 #1
0
        X_train, Y_train = generate_linear(n=100)
        X_test, Y_test = generate_linear(n=100)
    elif args.dataset == 'xor':
        X_train, Y_train = generate_XOR_easy()
        X_test, Y_test = generate_XOR_easy()
    else:
        raise RuntimeError('Dataset Not Found')

    net = Net()

    if args.criterion == 'mse':
        criterion = nn.MSE()
    elif args.criterion == 'crossentropy':
        criterion = nn.CrossEntropy()
    else:
        raise RuntimeError('Criterion Not Found')

    if args.optimizer == 'sgd':
        optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum)
    elif args.optimizer == 'adagrad':
        optimizer = optim.Adagrad(net.parameters(), lr=args.lr)
    else:
        raise RuntimeError('Optimizer Not Found')

    model = Model(net, criterion, optimizer)
    train_history = model.train(X_train, Y_train, epochs=args.epochs)
    test_history = model.test(X_test, Y_test)

    show_history(train_history)
    show_result(X_test, Y_test, test_history['predict'])
예제 #2
0
    # mlp.evaluate_training()
    # evaluate_file = input("What is the name of the file to evaluate the model on?\n")
    # mlp.evaluate(evaluate_file)
    #
    # save_weights = input("Do you want to save weights of the model?   (yes | no)\n").lower()
    # while save_weights != "yes" and save_weights != "no":
    #     save_weights = input("Do you want to save weights of the model?   (yes | no)\n").lower()
    # if save_weights == "yes":
    #     save_weights_file = input("Please provide file name where the weights should be saved to:\n")
    #     mlp.save_weights(save_weights_file)

    # UNCOMMENT WHEN YOU WANT TO FINALLY TEST THE MODEL
    # First round
    print("First round:")
    training_history = mlp.train()
    show_history(training_history)
    training_error_first_round = mlp.evaluate_training()
    evaluate_file = input(
        "What is the name of the file to evaluate the model on?\n")
    testing_error_first_round = mlp.evaluate(evaluate_file)
    first_round_error = (training_error_first_round *
                         0.3) + (testing_error_first_round * 0.7)
    print(
        "Total error in first round (training (30%), testing (70%)) was: {}\n".
        format(first_round_error))

    # Second round
    print("Second round:")
    load_weights_file = input(
        "What is the name of the file which you want to load weights from?\n")
    mlp.load_weights(load_weights_file)
예제 #3
0
    if args.load:
        net = torch.load(args.load)

    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
                          momentum=0.9,
                          weight_decay=5e-4)
    criterion = nn.CrossEntropyLoss()

    model = Model(net, optimizer, criterion)

    if args.trainable:
        train_history = model.train(train_loader,
                                    epochs=args.epochs,
                                    val_loader=test_loader)
        show_history(train_history,
                     f'history_{args.net}_{args.pretrained}.jpg')

        with open(f'score_{args.net}_{args.pretrained}.txt',
                  'w') as score_file:
            print(train_history['accuracy'], file=score_file)
            print(train_history['val_accuracy'], file=score_file)

    if args.save:
        model.save(args.save)

    test_history = model.test(test_loader)
    show_confusion_matrix(
        test_history['truth'], test_history['predict'],
        f'confusion_matrix_{args.net}_{args.pretrained}.jpg')
if train:
    model = create_model_2_lenet()
    model.summary()
    opt = Adam()

    model.compile(loss=categorical_crossentropy,
                  optimizer=opt,
                  metrics=['accuracy'])
    start = time()
    history_object = model.fit(x_train,
                               y_train,
                               batch_size=batch_size,
                               epochs=epochs,
                               validation_data=(x_test, y_test),
                               shuffle=True,
                               callbacks=[checkpoint])
    print("Training time:", time() - start)

    show_history(history_object)
else:
    model = load_model(os.path.join(dir_save, model_name))
    model.summary()
    print("H5 Output: " + str(model.output.op.name))
    print("H5 Input: " + str(model.input.op.name))

    # Score trained model.
    scores = model.evaluate(x_test, y_test, verbose=1)
    print('Validation loss:', scores[0])
    print('Validation accuracy:', scores[1])
예제 #5
0
        samples, targets = next(val_gen)
        preds = samples[:, -1, 1]
        mae = np.mean(np.abs(preds - targets))
        batch_maes.append(mae)
    print(np.mean(batch_maes))


# 开始使用基准评估
evaluate_naive_method()

# 训练并评估一个一维卷积神经网络
model = Sequential()
model.add(
    layers.Conv1D(32,
                  5,
                  activation='relu',
                  input_shape=(None, float_data.shape[-1])))
model.add(layers.MaxPooling1D(3))
model.add(layers.Conv1D(32, 5, activation='relu'))
model.add(layers.MaxPooling1D(3))
model.add(layers.Conv1D(32, 5, activation='relu'))
model.add(layers.GlobalMaxPooling1D())
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen,
                              steps_per_epoch=500,
                              epochs=20,
                              validation_data=val_gen,
                              validation_steps=val_steps)
show_history(history)