示例#1
0
                                   params=W,
                                   learning_rate=LR)
    updates = quantized_net.clipping_scaling(updates, mlp)

    # other parameters updates
    params = lasagne.layers.get_all_params(mlp,
                                           trainable=True,
                                           quantized=False)
    updates = OrderedDict(updates.items() + lasagne.updates.adam(
        loss_or_grads=loss, params=params, learning_rate=LR).items())

    test_output = lasagne.layers.get_output(mlp, deterministic=True)
    test_loss = T.mean(T.sqr(T.maximum(0., 1. - target * test_output)))
    test_err = T.mean(T.neq(T.argmax(test_output, axis=1),
                            T.argmax(target, axis=1)),
                      dtype=theano.config.floatX)

    # Compile a function performing a training step on a mini-batch (by giving the updates dictionary)
    # and returning the corresponding training loss:
    train_fn = theano.function([input, target, LR], loss, updates=updates)

    # Compile a second function computing the validation loss and accuracy:
    val_fn = theano.function([input, target], [test_loss, test_err])

    print('Training...')

    quantized_net.train(train_fn, val_fn, mlp, batch_size, LR_start, LR_decay,
                        num_epochs, train_set.X, train_set.y, valid_set.X,
                        valid_set.y, test_set.X, test_set.y, save_path,
                        shuffle_parts)
    updates = lasagne.updates.adam(loss_or_grads=W_grads,
                                   params=W,
                                   learning_rate=LR)
    updates = quantized_net.clipping_scaling(updates, mlp)

    # other parameters updates
    params = lasagne.layers.get_all_params(mlp,
                                           trainable=True,
                                           quantized=False)
    updates = OrderedDict(updates.items() + lasagne.updates.adam(
        loss_or_grads=loss, params=params, learning_rate=LR).items())

    test_output = lasagne.layers.get_output(mlp, deterministic=True)
    test_loss = T.mean(T.sqr(T.maximum(0., 1. - target * test_output)))
    test_err = T.mean(T.neq(T.argmax(test_output, axis=1),
                            T.argmax(target, axis=1)),
                      dtype=theano.config.floatX)

    # Compile a function performing a training step on a mini-batch (by giving the updates dictionary)
    # and returning the corresponding training loss:
    train_fn = theano.function([input, target, LR], loss, updates=updates)

    # Compile a second function computing the validation loss and accuracy:
    val_fn = theano.function([input, target], [test_loss, test_err])

    print('Training...')

    quantized_net.train(train_fn, val_fn, mlp, batch_size, LR_start, LR_decay,
                        num_epochs, train_images, train_labels, valid_images,
                        valid_labels, test_images, test_labels, save_path,
                        shuffle_parts)
示例#3
0
    test_loss = T.mean(T.sqr(T.maximum(0., 1. - target * test_output)))
    test_err = T.mean(T.neq(T.argmax(test_output, axis=1),
                            T.argmax(target, axis=1)),
                      dtype=theano.config.floatX)

    # Compile a function performing a training step on a mini-batch (by giving the updates dictionary)
    # and returning the corresponding training loss:
    train_fn = theano.function([input, target, LR], loss, updates=updates)

    # Compile a second function computing the validation loss and accuracy:
    val_fn = theano.function([input, target], [test_loss, test_err])

    print('Training...')

    my_val_loss, my_train_loss, my_val_err, mlp_best = quantized_net.train(
        train_fn, val_fn, mlp, batch_size, LR_start, LR_decay, num_epochs,
        X_train, y_train, X_val, y_val, X_test, y_test, save_path,
        shuffle_parts)

    output = lasagne.layers.get_output(mlp_best, X_test)
    print(output.shape.eval())
    print((output.eval()).shape)

    makeRoc(X_test, labels, y_test, output.eval(), "./")

    plt.figure()
    plt.plot(my_val_loss, label='validation')
    plt.plot(my_train_loss, label='train')
    plt.legend()
    plt.xlabel('epoch')
    plt.ylabel('loss')
    plt.savefig("./loss.png")
示例#4
0
    test_loss = T.mean(T.sqr(T.maximum(0., 1. - target * test_output)))
    test_err = T.mean(T.neq(T.argmax(test_output, axis=1),
                            T.argmax(target, axis=1)),
                      dtype=theano.config.floatX)

    # Compile a function performing a training step on a mini-batch (by giving the updates dictionary)
    # and returning the corresponding training loss:
    train_fn = theano.function([input, target, LR], loss, updates=updates)

    # Compile a second function computing the validation loss and accuracy:
    val_fn = theano.function([input, target], [test_loss, test_err])

    print('Training...')

    quantized_net.train(train_fn,
                        val_fn,
                        cnn,
                        batch_size,
                        LR_start,
                        LR_decay,
                        num_epochs,
                        train_set.X,
                        train_set.y,
                        valid_set.X,
                        valid_set.y,
                        test_set.X,
                        test_set.y,
                        save_path=save_path,
                        shuffle_parts=shuffle_parts,
                        rotations=random_rot_range)