예제 #1
0
def disc_shared_structure(state):
    model = nn.Sequential()
    model.add(nn.Convolutional(filter_size=(3, 3),
                               num_filters=state['d_num_filters'],
                               num_channels=state['input_channels'],
                               step=(1, 1), border_mode=(1, 1),
                               weight=state['d_conv_init'], use_bias=False))
    model.add(nn.BatchNorm(state['d_num_filters']))
    # model.add(nn.Expression(T.nnet.relu))
    model.add(nn.LeakyRectify())
    # out_shape == (b, num_filters, 28, 28)

    model.add(nn.Convolutional(filter_size=(4, 4),
                               num_filters=state['d_num_filters']*2,
                               num_channels=state['d_num_filters'],
                               step=(2, 2), border_mode=(1, 1),
                               weight=state['d_conv_init'], use_bias=False))
    model.add(nn.BatchNorm(state['d_num_filters']*2))
    # model.add(nn.Expression(T.nnet.relu))
    model.add(nn.LeakyRectify())
    # out_shape == (b, num_filters, 14, 14)

    model.add(nn.Convolutional(filter_size=(4, 4),
                               num_filters=state['d_num_filters']*4,
                               num_channels=state['d_num_filters']*2,
                               step=(2, 2), border_mode=(1, 1),
                               weight=state['d_conv_init'], use_bias=False))
    model.add(nn.BatchNorm(state['d_num_filters']*4))
    # model.add(nn.Expression(T.nnet.relu))
    model.add(nn.LeakyRectify())
    # out_shape == (b, num_filters, 7, 7)

    model.add(nn.Expression(lambda x: T.flatten(x, 2)))

    return model
예제 #2
0
def init_gen_model(state):
    gen_model = nn.Sequential()
    gen_model.add(
        nn.Linear(state['noise_size'],
                  state['g_num_filters'] * 4 * 7 * 7,
                  weight=state['g_init'],
                  use_bias=False))
    gen_model.add(nn.BatchNorm(state['g_num_filters'] * 4 * 7 * 7))
    gen_model.add(nn.Expression(T.nnet.relu))

    gen_model.add(
        nn.Expression(lambda x: T.reshape(x, (x.shape[0], state['g_num_filters'
                                                                ] * 4, 7, 7))))

    gen_model.add(
        nn.Deconvolutional(filter_size=(4, 4),
                           num_filters=state['g_num_filters'] * 4,
                           num_channels=state['g_num_filters'] * 2,
                           step=(2, 2),
                           border_mode=(1, 1),
                           use_bias=False,
                           weight=state['g_conv_init']))
    gen_model.add(nn.BatchNorm(state['g_num_filters'] * 2))
    gen_model.add(nn.Expression(T.nnet.relu))
    # out_shape == (b, num_filters, 14, 14)

    gen_model.add(
        nn.Deconvolutional(filter_size=(4, 4),
                           num_filters=state['g_num_filters'] * 2,
                           num_channels=state['g_num_filters'],
                           step=(2, 2),
                           border_mode=(1, 1),
                           use_bias=False,
                           weight=state['g_conv_init']))
    gen_model.add(nn.BatchNorm(state['g_num_filters']))
    gen_model.add(nn.Expression(T.nnet.relu))
    # out_shape == (b, input_channels, 28, 28)

    gen_model.add(
        nn.Deconvolutional(filter_size=(3, 3),
                           num_filters=state['g_num_filters'],
                           num_channels=state['input_channels'],
                           step=(1, 1),
                           border_mode=(1, 1),
                           use_bias=True,
                           weight=state['g_conv_init']))
    # gen_model.add(nn.Expression(T.nnet.sigmoid))

    # out_shape == (b, input_channels, 28, 28)

    return gen_model
예제 #3
0
def init_encoder(state):
    # inp_shape == (b, input_channels, 64, 64)
    model = nn.Sequential()
    model.add(
        nn.Convolutional(filter_size=(4, 4),
                         num_filters=state['d_num_filters'],
                         num_channels=state['input_channels'],
                         step=(2, 2),
                         border_mode=(1, 1),
                         weight=state['d_conv_init'],
                         use_bias=False,
                         name='d_enc_conv1'))
    model.add(nn.BatchNorm(state['d_num_filters']))
    model.add(nn.LeakyRectify())
    # model.add(nn.Expression(T.nnet.relu))
    # out_shape == (b, d_num_filters, 32, 32)

    model.add(
        nn.Convolutional(filter_size=(4, 4),
                         num_filters=state['d_num_filters'] * 2,
                         num_channels=state['d_num_filters'],
                         step=(2, 2),
                         border_mode=(1, 1),
                         weight=state['d_conv_init'],
                         use_bias=False,
                         name='d_enc_conv2'))
    model.add(nn.BatchNorm(state['d_num_filters'] * 2))
    model.add(nn.LeakyRectify())
    # model.add(nn.Expression(T.nnet.relu))
    # out_shape == (b, d_num_filters*2, 16, 16)

    model.add(
        nn.Convolutional(filter_size=(4, 4),
                         num_filters=state['d_num_filters'] * 4,
                         num_channels=state['d_num_filters'] * 2,
                         step=(2, 2),
                         border_mode=(1, 1),
                         weight=state['d_conv_init'],
                         use_bias=False,
                         name='d_enc_conv3'))
    model.add(nn.BatchNorm(state['d_num_filters'] * 4))
    model.add(nn.LeakyRectify())
    # model.add(nn.Expression(T.nnet.relu))
    # out_shape == (b, d_num_filters*4, 8, 8)

    return model
예제 #4
0
def init_gen_model(state):
    gen_model = nn.Sequential()
    gen_model.add(
        nn.Linear(state['noise_size'],
                  state['hidden_size'],
                  weight=state['init_g']))
    gen_model.add(nn.BatchNorm(state['hidden_size']))
    gen_model.add(nn.Expression(T.nnet.relu))
    gen_model.add(
        nn.Linear(state['hidden_size'],
                  state['hidden_size'],
                  weight=state['init_g']))
    gen_model.add(nn.BatchNorm(state['hidden_size']))
    gen_model.add(nn.Expression(T.nnet.relu))
    gen_model.add(
        nn.Linear(state['hidden_size'],
                  state['input_size'],
                  weight=state['init_g']))

    return gen_model
 def __init__(self):
     super().__init__()
     self.layers = [
         nn.Dense(3072, 100),  # layer1
         nn.BatchNorm(100),
         nn.ReLU(),
         nn.Dropout(0.75),
         nn.Dense(100, 100),  # layer2
         nn.BatchNorm(100),
         nn.ReLU(),
         nn.Dropout(0.75),
         nn.Dense(100, 100),  # layer3
         nn.BatchNorm(100),
         nn.ReLU(),
         nn.Dropout(0.75),
         nn.Dense(100, 100),  # layer4
         nn.BatchNorm(100),
         nn.ReLU(),
         nn.Dropout(0.75),
         nn.Dense(100, 10),  # layer5
     ]
예제 #6
0
def init_gen_model(state):
    gen_model = nn.Sequential()
    gen_model.add(
        nn.Linear(state['noise_size'],
                  state['g_num_filters'] * 4 * 4 * 4,
                  weight=state['g_init'],
                  use_bias=False))
    gen_model.add(nn.BatchNorm(state['g_num_filters'] * 4 * 4 * 4))
    gen_model.add(nn.Expression(T.nnet.relu))

    gen_model.add(
        nn.Expression(lambda x: T.reshape(x, (x.shape[0], state['g_num_filters'
                                                                ] * 4, 4, 4))))

    gen_model.add(
        nn.Deconvolutional(filter_size=(4, 4),
                           num_filters=state['g_num_filters'] * 4,
                           num_channels=state['g_num_filters'] * 4,
                           step=(2, 2),
                           border_mode=(1, 1),
                           use_bias=False,
                           weight=state['g_conv_init']))
    gen_model.add(nn.BatchNorm(state['g_num_filters'] * 4))
    gen_model.add(nn.Expression(T.nnet.relu))
    # out_shape == (b, g_num_filters*6, 8, 8)

    gen_model.add(
        nn.Deconvolutional(filter_size=(4, 4),
                           num_filters=state['g_num_filters'] * 4,
                           num_channels=state['g_num_filters'] * 2,
                           step=(2, 2),
                           border_mode=(1, 1),
                           use_bias=False,
                           weight=state['g_conv_init']))
    gen_model.add(nn.BatchNorm(state['g_num_filters'] * 2))
    gen_model.add(nn.Expression(T.nnet.relu))
    # out_shape == (b, g_num_filters*2, 16, 16)

    gen_model.add(
        nn.Deconvolutional(filter_size=(4, 4),
                           num_filters=state['g_num_filters'] * 2,
                           num_channels=state['g_num_filters'],
                           step=(2, 2),
                           border_mode=(1, 1),
                           use_bias=False,
                           weight=state['g_conv_init']))
    gen_model.add(nn.BatchNorm(state['g_num_filters']))
    gen_model.add(nn.Expression(T.nnet.relu))
    # out_shape == (b, g_num_filters, 32, 32)

    gen_model.add(
        nn.Deconvolutional(filter_size=(4, 4),
                           num_filters=state['g_num_filters'],
                           num_channels=state['input_channels'],
                           step=(2, 2),
                           border_mode=(1, 1),
                           use_bias=True,
                           weight=state['g_conv_init']))
    gen_model.add(nn.Expression(T.tanh))
    # out_shape == (b, input_channels, 64, 64)

    return gen_model
예제 #7
0
def disc_shared_structure(state):
    # inp_shape == (b, input_channels, 64, 64)
    model = nn.Sequential()
    if state['dropout'] > 0:
        model.add(nn.Dropout(state['dropout']))
    model.add(
        nn.Convolutional(filter_size=(4, 4),
                         num_filters=state['d_num_filters'],
                         num_channels=state['input_channels'],
                         step=(2, 2),
                         border_mode=(1, 1),
                         weight=state['d_conv_init'],
                         use_bias=False,
                         name='d_conv1'))
    model.add(nn.BatchNorm(state['d_num_filters']))
    # model.add(nn.LeakyRectify())
    model.add(nn.Expression(T.nnet.relu))
    # out_shape == (b, num_filters, 32, 32)

    model.add(
        nn.Convolutional(filter_size=(4, 4),
                         num_filters=state['d_num_filters'] * 2,
                         num_channels=state['d_num_filters'],
                         step=(2, 2),
                         border_mode=(1, 1),
                         weight=state['d_conv_init'],
                         use_bias=False,
                         name='d_conv2'))
    model.add(nn.BatchNorm(state['d_num_filters'] * 2))
    # model.add(nn.LeakyRectify())
    model.add(nn.Expression(T.nnet.relu))
    # out_shape == (b, num_filters, 16, 16)

    model.add(
        nn.Convolutional(filter_size=(4, 4),
                         num_filters=state['d_num_filters'] * 4,
                         num_channels=state['d_num_filters'] * 2,
                         step=(2, 2),
                         border_mode=(1, 1),
                         weight=state['d_conv_init'],
                         use_bias=False))
    model.add(nn.BatchNorm(state['d_num_filters'] * 4))
    # model.add(nn.LeakyRectify())
    model.add(nn.Expression(T.nnet.relu))
    # out_shape == (b, num_filters, 8, 8)

    model.add(
        nn.Convolutional(filter_size=(4, 4),
                         num_filters=state['d_num_filters'] * 4,
                         num_channels=state['d_num_filters'] * 4,
                         step=(2, 2),
                         border_mode=(1, 1),
                         weight=state['d_conv_init'],
                         use_bias=False))
    model.add(nn.BatchNorm(state['d_num_filters'] * 4))
    # model.add(nn.LeakyRectify())
    model.add(nn.Expression(T.nnet.relu))
    # out_shape == (b, num_filters, 4, 4)

    model.add(nn.Expression(lambda x: T.flatten(x, 2)))

    return model
예제 #8
0
def cifar10(
        path,  # pylint: disable=invalid-name
        conv_channels=None,
        linear_layers=None,
        batch_norm=True,
        batch_size=128,
        num_threads=4,
        min_queue_examples=1000,
        mode="train"):
    """Cifar10 classification with a convolutional network."""

    # Data.
    _maybe_download_cifar10(path)

    # Read images and labels from disk.
    if mode == "train":
        filenames = [
            os.path.join(path, CIFAR10_FOLDER, "data_batch_{}.bin".format(i))
            for i in xrange(1, 6)
        ]
    elif mode == "test":
        filenames = [os.path.join(path, "test_batch.bin")]
    else:
        raise ValueError("Mode {} not recognised".format(mode))

    depth = 3
    height = 32
    width = 32
    label_bytes = 1
    image_bytes = depth * height * width
    record_bytes = label_bytes + image_bytes
    reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
    _, record = reader.read(tf.train.string_input_producer(filenames))
    record_bytes = tf.decode_raw(record, tf.uint8)

    label = tf.cast(tf.slice(record_bytes, [0], [label_bytes]), tf.int32)
    raw_image = tf.slice(record_bytes, [label_bytes], [image_bytes])
    image = tf.cast(tf.reshape(raw_image, [depth, height, width]), tf.float32)
    # height x width x depth.
    image = tf.transpose(image, [1, 2, 0])
    image = tf.div(image, 255)

    queue = tf.RandomShuffleQueue(
        capacity=min_queue_examples + 3 * batch_size,
        min_after_dequeue=min_queue_examples,
        dtypes=[tf.float32, tf.int32],
        shapes=[image.get_shape(), label.get_shape()])
    enqueue_ops = [queue.enqueue([image, label]) for _ in xrange(num_threads)]
    tf.train.add_queue_runner(tf.train.QueueRunner(queue, enqueue_ops))

    # Network.
    def _conv_activation(x):  # pylint: disable=invalid-name
        return tf.nn.max_pool(tf.nn.relu(x),
                              ksize=[1, 2, 2, 1],
                              strides=[1, 2, 2, 1],
                              padding="SAME")

    conv = nn.ConvNet2D(output_channels=conv_channels,
                        kernel_shapes=[5],
                        strides=[1],
                        paddings=[nn.SAME],
                        activation=_conv_activation,
                        activate_final=True,
                        initializers=_nn_initializers,
                        use_batch_norm=batch_norm)

    if batch_norm:
        linear_activation = lambda x: tf.nn.relu(nn.BatchNorm()(x))
    else:
        linear_activation = tf.nn.relu

    mlp = nn.MLP(list(linear_layers) + [10],
                 activation=linear_activation,
                 initializers=_nn_initializers)
    network = nn.Sequential([conv, nn.BatchFlatten(), mlp])

    def build():
        image_batch, label_batch = queue.dequeue_many(batch_size)
        label_batch = tf.reshape(label_batch, [batch_size])

        output = network(image_batch)
        return _xent_loss(output, label_batch)

    return build
    
np.random.seed(12345)

############################
# Init model & parameters
############################

# 1) Eneregy discriminator
disc_model = nn.Sequential()
disc_model.add(nn.Convolutional(filter_size=(3, 3),
                                num_filters=state['d_num_filters'],
                                num_channels=state['input_channels'],
                                step=(1, 1), border_mode=(1, 1),
                                weight=state['d_init'], use_bias=False,
                                name='d_conv1'))
disc_model.add(nn.BatchNorm(state['d_num_filters'], name='d_bn1'))
disc_model.add(nn.Expression(T.nnet.relu))
# out_shape == (b, num_filters, 32, 32)

disc_model.add(nn.Convolutional(filter_size=(4, 4),
                                num_filters=state['d_num_filters']*2,
                                num_channels=state['d_num_filters'],
                                step=(2, 2), border_mode=(1, 1),
                                weight=state['d_init'], use_bias=False,
                                name='d_conv2'))
disc_model.add(nn.BatchNorm(state['d_num_filters']*2, name='d_bn2'))
disc_model.add(nn.Expression(T.nnet.relu))
# out_shape == (b, num_filters, 16, 16)

disc_model.add(nn.Convolutional(filter_size=(4, 4),
                                num_filters=state['d_num_filters']*4,