Ejemplo n.º 1
0
def disc_shared_structure(state):
    model = nn.Sequential()
    model.add(nn.Convolutional(filter_size=(3, 3),
                               num_filters=state['d_num_filters'],
                               num_channels=state['input_channels'],
                               step=(1, 1), border_mode=(1, 1),
                               weight=state['d_conv_init'], use_bias=False))
    model.add(nn.BatchNorm(state['d_num_filters']))
    # model.add(nn.Expression(T.nnet.relu))
    model.add(nn.LeakyRectify())
    # out_shape == (b, num_filters, 28, 28)

    model.add(nn.Convolutional(filter_size=(4, 4),
                               num_filters=state['d_num_filters']*2,
                               num_channels=state['d_num_filters'],
                               step=(2, 2), border_mode=(1, 1),
                               weight=state['d_conv_init'], use_bias=False))
    model.add(nn.BatchNorm(state['d_num_filters']*2))
    # model.add(nn.Expression(T.nnet.relu))
    model.add(nn.LeakyRectify())
    # out_shape == (b, num_filters, 14, 14)

    model.add(nn.Convolutional(filter_size=(4, 4),
                               num_filters=state['d_num_filters']*4,
                               num_channels=state['d_num_filters']*2,
                               step=(2, 2), border_mode=(1, 1),
                               weight=state['d_conv_init'], use_bias=False))
    model.add(nn.BatchNorm(state['d_num_filters']*4))
    # model.add(nn.Expression(T.nnet.relu))
    model.add(nn.LeakyRectify())
    # out_shape == (b, num_filters, 7, 7)

    model.add(nn.Expression(lambda x: T.flatten(x, 2)))

    return model
Ejemplo n.º 2
0
def init_encoder(state):
    # inp_shape == (b, input_channels, 64, 64)
    model = nn.Sequential()
    model.add(
        nn.Convolutional(filter_size=(4, 4),
                         num_filters=state['d_num_filters'],
                         num_channels=state['input_channels'],
                         step=(2, 2),
                         border_mode=(1, 1),
                         weight=state['d_conv_init'],
                         use_bias=False,
                         name='d_enc_conv1'))
    model.add(nn.BatchNorm(state['d_num_filters']))
    model.add(nn.LeakyRectify())
    # model.add(nn.Expression(T.nnet.relu))
    # out_shape == (b, d_num_filters, 32, 32)

    model.add(
        nn.Convolutional(filter_size=(4, 4),
                         num_filters=state['d_num_filters'] * 2,
                         num_channels=state['d_num_filters'],
                         step=(2, 2),
                         border_mode=(1, 1),
                         weight=state['d_conv_init'],
                         use_bias=False,
                         name='d_enc_conv2'))
    model.add(nn.BatchNorm(state['d_num_filters'] * 2))
    model.add(nn.LeakyRectify())
    # model.add(nn.Expression(T.nnet.relu))
    # out_shape == (b, d_num_filters*2, 16, 16)

    model.add(
        nn.Convolutional(filter_size=(4, 4),
                         num_filters=state['d_num_filters'] * 4,
                         num_channels=state['d_num_filters'] * 2,
                         step=(2, 2),
                         border_mode=(1, 1),
                         weight=state['d_conv_init'],
                         use_bias=False,
                         name='d_enc_conv3'))
    model.add(nn.BatchNorm(state['d_num_filters'] * 4))
    model.add(nn.LeakyRectify())
    # model.add(nn.Expression(T.nnet.relu))
    # out_shape == (b, d_num_filters*4, 8, 8)

    return model
Ejemplo n.º 3
0
def disc_shared_structure(state):
    # inp_shape == (b, input_channels, 64, 64)
    model = nn.Sequential()
    if state['dropout'] > 0:
        model.add(nn.Dropout(state['dropout']))
    model.add(
        nn.Convolutional(filter_size=(4, 4),
                         num_filters=state['d_num_filters'],
                         num_channels=state['input_channels'],
                         step=(2, 2),
                         border_mode=(1, 1),
                         weight=state['d_conv_init'],
                         use_bias=False,
                         name='d_conv1'))
    model.add(nn.BatchNorm(state['d_num_filters']))
    # model.add(nn.LeakyRectify())
    model.add(nn.Expression(T.nnet.relu))
    # out_shape == (b, num_filters, 32, 32)

    model.add(
        nn.Convolutional(filter_size=(4, 4),
                         num_filters=state['d_num_filters'] * 2,
                         num_channels=state['d_num_filters'],
                         step=(2, 2),
                         border_mode=(1, 1),
                         weight=state['d_conv_init'],
                         use_bias=False,
                         name='d_conv2'))
    model.add(nn.BatchNorm(state['d_num_filters'] * 2))
    # model.add(nn.LeakyRectify())
    model.add(nn.Expression(T.nnet.relu))
    # out_shape == (b, num_filters, 16, 16)

    model.add(
        nn.Convolutional(filter_size=(4, 4),
                         num_filters=state['d_num_filters'] * 4,
                         num_channels=state['d_num_filters'] * 2,
                         step=(2, 2),
                         border_mode=(1, 1),
                         weight=state['d_conv_init'],
                         use_bias=False))
    model.add(nn.BatchNorm(state['d_num_filters'] * 4))
    # model.add(nn.LeakyRectify())
    model.add(nn.Expression(T.nnet.relu))
    # out_shape == (b, num_filters, 8, 8)

    model.add(
        nn.Convolutional(filter_size=(4, 4),
                         num_filters=state['d_num_filters'] * 4,
                         num_channels=state['d_num_filters'] * 4,
                         step=(2, 2),
                         border_mode=(1, 1),
                         weight=state['d_conv_init'],
                         use_bias=False))
    model.add(nn.BatchNorm(state['d_num_filters'] * 4))
    # model.add(nn.LeakyRectify())
    model.add(nn.Expression(T.nnet.relu))
    # out_shape == (b, num_filters, 4, 4)

    model.add(nn.Expression(lambda x: T.flatten(x, 2)))

    return model
        for f in glob.glob(files):
            os.remove(f)

pkl.dump(state, file('%s/state.pkl' % saveto, 'w'))
    
np.random.seed(12345)

############################
# Init model & parameters
############################

# 1) Eneregy discriminator
disc_model = nn.Sequential()
disc_model.add(nn.Convolutional(filter_size=(3, 3),
                                num_filters=state['d_num_filters'],
                                num_channels=state['input_channels'],
                                step=(1, 1), border_mode=(1, 1),
                                weight=state['d_init'], use_bias=False,
                                name='d_conv1'))
disc_model.add(nn.BatchNorm(state['d_num_filters'], name='d_bn1'))
disc_model.add(nn.Expression(T.nnet.relu))
# out_shape == (b, num_filters, 32, 32)

disc_model.add(nn.Convolutional(filter_size=(4, 4),
                                num_filters=state['d_num_filters']*2,
                                num_channels=state['d_num_filters'],
                                step=(2, 2), border_mode=(1, 1),
                                weight=state['d_init'], use_bias=False,
                                name='d_conv2'))
disc_model.add(nn.BatchNorm(state['d_num_filters']*2, name='d_bn2'))
disc_model.add(nn.Expression(T.nnet.relu))
# out_shape == (b, num_filters, 16, 16)