예제 #1
0
def define_recurrent_layers(out_axes=None,
                            celltype='RNN',
                            recurrent_units=[32],
                            init=GlorotInit(),
                            return_sequence=True):
    layers = []
    for e, i in enumerate(recurrent_units):
        layer_return_sequence = e < len(recurrent_units) - 1 or return_sequence
        if celltype == 'RNN':
            layers.append(
                Recurrent(nout=i,
                          init=init,
                          backward=False,
                          activation=Tanh(),
                          return_sequence=layer_return_sequence))
        elif celltype == 'LSTM':
            layers.append(
                LSTM(nout=i,
                     init=init,
                     backward=False,
                     activation=Tanh(),
                     gate_activation=Logistic(),
                     return_sequence=layer_return_sequence))
    if out_axes is not None:
        affine_layer = Affine(weight_init=init,
                              bias_init=init,
                              activation=Identity(),
                              axes=out_axes)
        layers.append(affine_layer)
    return layers
예제 #2
0
def define_model(out_axis, filter_shapes=[5], n_filters=[32], init=KaimingInit()):
    assert len(filter_shapes) == len(n_filters)

    layers = []
    for e, (f, n) in enumerate(zip(filter_shapes, n_filters)):
        layers.append(Convolution(filter_shape=(f, n), filter_init=init, strides=1, padding="valid", dilation=1, activation=Rectlin(), batch_norm=True))

    affine_layer = Affine(weight_init=init, bias_init=init,
                          activation=Identity(), axes=out_axis)

    model = Sequential(layers + [affine_layer])

    return model
예제 #3
0

parser = NgraphArgparser(description='MLP GAN example')
args = parser.parse_args()

#  model parameters
h_dim = 4
minibatch_discrimination = False
num_iterations = 600
batch_size = 12
num_examples = num_iterations * batch_size

# generator
generator_layers = [
    affine_layer(h_dim, Rectlin(), name='g0'),
    affine_layer(1, Identity(), name='g1')
]
generator = Sequential(generator_layers)

# discriminator
discriminator_layers = [
    affine_layer(2 * h_dim, Tanh(), name='d0'),
    affine_layer(2 * h_dim, Tanh(), name='d1')
]
if minibatch_discrimination:
    raise NotImplementedError
else:
    discriminator_layers.append(affine_layer(2 * h_dim, Tanh(), name='d2'))
discriminator_layers.append(affine_layer(1, Logistic(), name='d3'))
discriminator = Sequential(discriminator_layers)
예제 #4
0
    'X': ng.placeholder(in_axes),
    'y': ng.placeholder(out_axes),
    'iteration': ng.placeholder(axes=())
}

# Network Definition
seq1 = Sequential([
    LSTM(nout=recurrent_units,
         init=init_uni,
         backward=False,
         activation=Logistic(),
         gate_activation=Tanh(),
         return_sequence=predict_seq),
    Affine(weight_init=init_uni,
           bias_init=init_uni,
           activation=Identity(),
           axes=out_axis)
])

# Optimizer
# Following policy will set the initial learning rate to 0.05 (base_lr)
# At iteration (num_iterations // 5), learning rate is multiplied by gamma (new lr = .005)
# At iteration (num_iterations // 2), it will be reduced by gamma again (new lr = .0005)
schedule = [num_iterations // 5, num_iterations // 2]
learning_rate_policy = {
    'name': 'schedule',
    'schedule': schedule,
    'gamma': 0.1,
    'base_lr': 0.05
}
optimizer = Adam(learning_rate=learning_rate_policy,
예제 #5
0
previous_steps = [ng.constant(0., [batch_axis, feature_axis])] + [target_steps[i] for i in range(seq_len - 1)]
previous = ng.stack(previous_steps, time_axis)

# define model
encoder_recurrent_units = list(map(int, args.n_hidden.split(",")))
if args.bottleneck:
    decoder_recurrent_units = encoder_recurrent_units[::-1]
else:
    decoder_recurrent_units = encoder_recurrent_units
encoder = recurrent_model.RecurrentEncoder(celltype=args.modeltype,
                                           recurrent_units=encoder_recurrent_units,
                                           bottleneck=args.bottleneck)
decoder = recurrent_model.RecurrentDecoder(out_axes=(feature_axis,), celltype=args.modeltype,
                                           recurrent_units=decoder_recurrent_units)

affine_layer = Affine(weight_init=init_uni, bias_init=init_uni, activation=Identity(),
                      axes=[out_axis])

# Optimizer
optimizer = RMSProp(gradient_clip_value=args.grad_clip_value, learning_rate=args.lr)


def predictions(encoder, affine_layer, inputs):
    encoded = encoder(inputs, combine=True)
    preds = affine_layer(encoded)
    preds = ng.axes_with_order(preds, rul_axes)
    return preds


def build_seq2seq_computations():
    # Training loss, optimizer