예제 #1
0
    # Prepare Theano variables for inputs and targets
    input = T.tensor4('inputs')
    target = T.matrix('targets')
    LR = T.scalar('LR', dtype=theano.config.floatX)

    mlp = lasagne.layers.InputLayer(shape=(None, 1, 28, 28), input_var=input)

    mlp = lasagne.layers.DropoutLayer(mlp, p=dropout_in)

    for k in range(n_hidden_layers):

        mlp = binary_connect.DenseLayer(
            mlp,
            binary=binary,
            stochastic=stochastic,
            H=H,
            nonlinearity=lasagne.nonlinearities.identity,
            num_units=num_units)

        mlp = batch_norm.BatchNormLayer(
            mlp,
            epsilon=epsilon,
            alpha=alpha,
            nonlinearity=lasagne.nonlinearities.rectify)

        mlp = lasagne.layers.DropoutLayer(mlp, p=dropout_hidden)

    mlp = binary_connect.DenseLayer(
        mlp,
        binary=binary,
예제 #2
0
    cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))

    cnn = batch_norm.BatchNormLayer(
        cnn,
        epsilon=epsilon,
        alpha=alpha,
        nonlinearity=lasagne.nonlinearities.rectify)

    # print(cnn.output_shape)

    # 1024FP-1024FP-10FP
    cnn = binary_connect.DenseLayer(
        cnn,
        binary=binary,
        stochastic=stochastic,
        H=H,
        W_LR_scale=W_LR_scale,
        nonlinearity=lasagne.nonlinearities.identity,
        num_units=1024)

    cnn = batch_norm.BatchNormLayer(
        cnn,
        epsilon=epsilon,
        alpha=alpha,
        nonlinearity=lasagne.nonlinearities.rectify)

    cnn = binary_connect.DenseLayer(
        cnn,
        binary=binary,
        stochastic=stochastic,
        H=H,