コード例 #1
0
    test_output = lasagne.layers.get_output(mlp, deterministic=True)
    test_loss = T.mean(T.sqr(T.maximum(0., 1. - target * test_output)))
    test_err = T.mean(T.neq(T.argmax(test_output, axis=1),
                            T.argmax(target, axis=1)),
                      dtype=theano.config.floatX)

    # Compile a function performing a training step on a mini-batch (by giving the updates dictionary)
    # and returning the corresponding training loss:
    train_fn = theano.function([input, target, LR], loss, updates=updates)

    # Compile a second function computing the validation loss and accuracy:
    val_fn = theano.function([input, target], [test_loss, test_err])

    print('Training...')

    binary_connect.train(train_fn, val_fn, batch_size, LR_start, LR_decay,
                         num_epochs, train_set.X, train_set.y, valid_set.X,
                         valid_set.y, test_set.X, test_set.y)

    # print("display histogram")

    # W = lasagne.layers.get_all_layers(mlp)[2].W.get_value()
    # print(W.shape)

    # histogram = np.histogram(W,bins=1000,range=(-1.1,1.1))
    # np.savetxt(str(dropout_hidden)+str(binary)+str(stochastic)+str(H)+"_hist0.csv", histogram[0], delimiter=",")
    # np.savetxt(str(dropout_hidden)+str(binary)+str(stochastic)+str(H)+"_hist1.csv", histogram[1], delimiter=",")

    # Optionally, you could now dump the network weights to a file like this:
    # np.savez('model.npz', lasagne.layers.get_all_param_values(network))
コード例 #2
0
    test_loss = T.mean(T.sqr(T.maximum(0., 1. - target * test_output)))
    test_err = T.mean(T.neq(T.argmax(test_output, axis=1),
                            T.argmax(target, axis=1)),
                      dtype=theano.config.floatX)

    # Compile a function performing a training step on a mini-batch (by giving the updates dictionary)
    # and returning the corresponding training loss:
    train_fn = theano.function([input, target, LR], loss, updates=updates)

    # Compile a second function computing the validation loss and accuracy:
    val_fn = theano.function([input, target], [test_loss, test_err])

    print('Training...')

    binary_connect.train(train_fn, val_fn, batch_size, LR_start, LR_decay, 200,
                         train_images, train_labels, valid_images,
                         valid_labels, test_images, test_labels)

    # store the weights value and bias value of each binaryDenselayer
    from six.moves import cPickle as pickle
    print(lasagne.layers.get_all_params(mlp))
    para = lasagne.layers.get_all_param_values(mlp)
    weights = []
    means = []
    stds = []
    for i in range(0, 24, 6):
        weights.append(para[i])
        means.append(para[i + 2])
        stds.append(para[i + 3])

    # save the values to a pickle file in order to construct thr model of tensorflow framework
コード例 #3
0
            loss_or_grads=loss, params=params, learning_rate=LR).items())

    else:
        params = lasagne.layers.get_all_params(cnn, trainable=True)
        updates = lasagne.updates.adam(loss_or_grads=loss,
                                       params=params,
                                       learning_rate=LR)

    test_output = lasagne.layers.get_output(cnn, deterministic=True)
    test_loss = T.mean(T.sqr(T.maximum(0., 1. - target * test_output)))
    test_err = T.mean(T.neq(T.argmax(test_output, axis=1),
                            T.argmax(target, axis=1)),
                      dtype=theano.config.floatX)

    # Compile a function performing a training step on a mini-batch (by giving the updates dictionary)
    # and returning the corresponding training loss:
    train_fn = theano.function([input, target, LR], loss, updates=updates)

    # Compile a second function computing the validation loss and accuracy:
    val_fn = theano.function([input, target], [test_loss, test_err])

    print('Training...')

    print(gates.train.X.shape)
    binary_connect.train(train_fn, val_fn, batch_size, LR_start, LR_decay,
                         num_epochs, gates.train.X, gates.train.Y,
                         gates.valid.X, gates.valid.Y, gates.test.X,
                         gates.test.Y)

    np.savez('model_all.npz', lasagne.layers.get_all_param_values(cnn))