コード例 #1
0
ファイル: mnist.py プロジェクト: sunnerzs/dpgan-1
def generator_forward(config,
                      noise=None,
                      scope="generator",
                      name=None,
                      reuse=False,
                      num_samples=-1):
    with tf.variable_scope(scope, name, reuse=reuse):
        if noise is None:
            noise = tf.random_normal(
                [config.batch_size if num_samples == -1 else num_samples, 128],
                name="noise")

        output = fully_connected(noise, 4 * 4 * 4 * config.dim)
        output = batch_normalization(output)
        output = tf.nn.relu(output)
        output = tf.reshape(output, [-1, 4, 4, 4 * config.dim])

        output = conv_2d_transpose(output,
                                   2 * config.dim,
                                   5, [8, 8],
                                   strides=2)
        output = output[:, :7, :7, :]

        output = conv_2d_transpose(output, config.dim, 5, [14, 14], strides=2)
        output = tf.nn.relu(output)

        output = conv_2d_transpose(output, 1, 5, [28, 28], strides=2)

        output = tf.tanh(output)

    return output
コード例 #2
0
def Network():
    network = input_data(shape=[None, 128, 128, 1], name='input')
    network = conv_2d(network, 3, 3, strides=1, activation='relu')
    layer1 = max_pool_2d(network, 3)
    network = conv_2d(layer1, 64, 3, strides=1, activation='relu')
    network = conv_2d(network, 64, 3, strides=1, activation='relu')
    layer2 = max_pool_2d(network, 3)
    network = conv_2d(layer2, 128, 3, strides=1, activation='relu')
    network = conv_2d(network, 128, 3, strides=1, activation='relu')
    network = conv_2d(network, 128, 3, strides=1, activation='relu')
    network = conv_2d(network, 128, 3, strides=2, activation='relu')
    layer3 = max_pool_2d(network, 3)
    network = conv_2d(layer3, 256, 3, strides=1, activation='relu')
    network = conv_2d(network, 256, 3, strides=1, activation='relu')
    network = conv_2d(network, 256, 3, strides=1, activation='relu')
    network = conv_2d(network, 256, 3, strides=2, activation='relu')
    layer4 = max_pool_2d(network, 3, name="layer4")
    network = conv_2d(layer4, 512, 3, strides=1, activation='relu')
    network = conv_2d(network, 512, 3, strides=1, activation='relu')
    network = conv_2d(network, 512, 3, strides=1, activation='relu')
    network = conv_2d(network, 512, 3, strides=2, activation='relu')
    layer5 = max_pool_2d(network, 3, name="layer5")
    network = conv_2d(layer5, 256, 1, strides=1, activation='relu')
    network = batch_normalization(network)
    network = conv_2d_transpose(network,
                                256,
                                1, [32, 32],
                                strides=2,
                                activation='relu')
    layer4 = batch_normalization(layer4)
    network = tflearn.layers.merge_ops.merge([layer4, network],
                                             mode="elemwise_sum")
    network = conv_2d(network, 128, 3, strides=1, activation='relu')
    network = conv_2d_transpose(network,
                                128,
                                1, [64, 64],
                                strides=2,
                                activation='relu')
    layer3 = batch_normalization(layer3)
    network = tflearn.layers.merge_ops.merge([layer3, network],
                                             mode="elemwise_sum",
                                             axis=1)
    network = conv_2d(network, 64, 3, strides=1, activation='relu')
    network = conv_2d_transpose(network,
                                64,
                                1, [128, 128],
                                strides=2,
                                activation='relu')
    layer2 = batch_normalization(layer2)
    network = tflearn.layers.merge_ops.merge([layer2, network],
                                             mode="elemwise_sum",
                                             axis=1)
    network = conv_2d(network, 3, 3, strides=1, activation='relu')
    layer1 = batch_normalization(layer1)
    network = tflearn.layers.merge_ops.merge([layer1, network],
                                             mode="elemwise_sum",
                                             axis=1)
    network = conv_2d(network, 3, 3, strides=1, activation='relu')
    network = conv_2d(network, 2, 3, strides=1, activation='softmax')
    return network
コード例 #3
0
def model():
    # loading saved numpy files with training data
    INPATH = 'training_input_cropped.npy'
    OUTPATH = 'training_output_cropped.npy'

    if not os.path.exists(INPATH) or not os.path.exists(OUTPATH):
        training_input, training_output = fetch_data.create_train_data()

    else:
        training_input = np.load(INPATH)
        training_output = np.load(OUTPATH)

    #split into training and validation set
    X = training_input[:88000].reshape(-1, 100, 100, 3)
    Y = training_output[:88000].reshape(-1, 100, 100, 3)

    test_x = training_input[12000:].reshape(-1, 100, 100, 3)
    test_y = training_output[12000:].reshape(-1, 100, 100, 3)

    convnet = input_data(shape=[None, 100, 100, 3], name='input')

    convnet = conv_2d(convnet, 32, 5, activation='relu')
    convnet = conv_2d(convnet, 64, 5, activation='relu')
    convnet = conv_2d(convnet, 128, 5, activation='relu')
    convnet = conv_2d(convnet, 256, 5, activation='relu')
    convnet = conv_2d(convnet, 512, 5, activation='relu')

    convnet = conv_2d_transpose(convnet, 256, 5, [100, 100], activation='relu')
    convnet = conv_2d_transpose(convnet, 128, 5, [100, 100], activation='relu')
    convnet = conv_2d(convnet, 32, 3, activation='relu')

    convnet = dropout(convnet, 0.8)

    convnet = conv_2d(convnet, 3, 5, activation='linear')
    # Adam optizer used to minimize mean square loss between pixel values
    convnet = regression(convnet,
                         optimizer='adam',
                         learning_rate=0.001,
                         loss='mean_square',
                         name='targets')

    model = tflearn.DNN(convnet, tensorboard_dir='log')

    if os.path.exists('model3'):
        model.load('model3\sketch1')
        print('model loaded')
    else:
        model.fit({'input': X}, {'targets': Y},
                  n_epoch=6,
                  validation_set=({
                      'input': test_x
                  }, {
                      'targets': test_y
                  }),
                  snapshot_step=500,
                  show_metric=True)

        model.save('model3\sketch1')

    return model
コード例 #4
0
def generator_forward(config,
                      noise=None,
                      scope="generator",
                      name=None,
                      reuse=False,
                      num_samples=-1):
    with tf.variable_scope(scope, name, reuse=reuse):
        if noise is None:
            noise = tf.random_normal(
                [config.batch_size if num_samples == -1 else num_samples, 128],
                name="noise")

        output = fully_connected(noise,
                                 4 * 4 * 8 * config.gen_dim,
                                 name="input")
        output = tf.reshape(output, [-1, 4, 4, 8 * config.gen_dim])
        output = batch_normalization(output)
        output = relu(output)

        output = conv_2d_transpose(output,
                                   4 * config.gen_dim,
                                   5, [8, 8],
                                   name="conv1",
                                   strides=2)
        output = batch_normalization(output)
        output = relu(output)

        output = conv_2d_transpose(output,
                                   2 * config.gen_dim,
                                   5, [16, 16],
                                   name="conv2",
                                   strides=2)
        output = batch_normalization(output)
        output = relu(output)

        output = conv_2d_transpose(output,
                                   config.gen_dim,
                                   5, [32, 32],
                                   name="conv3",
                                   strides=2)
        output = batch_normalization(output)
        output = relu(output)

        output = conv_2d_transpose(output,
                                   3,
                                   5, [64, 64],
                                   name="conv4",
                                   strides=2)
        output = tf.tanh(output)

    return output
コード例 #5
0
ファイル: network.py プロジェクト: ka2hyeon/ral2020
def deconv(h_0,
           filters,
           kernel_size,
           strides,
           output_shape=[],
           activation='relu',
           trainable=True):
    if not output_shape:
        h = int(((h_0.shape[1] - 1) * strides) + kernel_size) - 1
        w = int(((h_0.shape[2] - 1) * strides) + kernel_size) - 1
    else:
        h = int(output_shape[1])
        w = int(output_shape[2])
    init = tflearn.initializations.truncated_normal(shape=None,
                                                    mean=0.0,
                                                    stddev=0.01,
                                                    dtype=tf.float32,
                                                    seed=None)
    h1 = conv_2d_transpose(h_0,
                           nb_filter=filters,
                           filter_size=kernel_size,
                           strides=strides,
                           output_shape=[h, w],
                           weights_init=init,
                           trainable=trainable)
    h1_bn = batch_normalization(h1, trainable=trainable)
    if activation == 'relu':
        h1_o = tf.nn.relu(h1_bn)
    elif activation == 'none':
        h1_o = h1_bn
    print(h1_o.shape)
    return h1_o
コード例 #6
0
def conv_2d_transpose_layer(input, n_filters, stride, output_shape):
    return conv.conv_2d_transpose(input,
                                  n_filters,
                                  3,
                                  output_shape=output_shape,
                                  strides=stride,
                                  padding='same',
                                  activation='elu',
                                  bias_init='zeros',
                                  scope=None,
                                  name='Conv3D')
コード例 #7
0
ファイル: infogan.py プロジェクト: nmiculinic/du-fer
def default_generator(net):
    net = fully_connected(net, 7 * 7 * 16, scope='fc')
    net = tf.reshape(net, [tf.shape(net)[0], 7, 7, 16])
    net = conv_2d_transpose(net, 1, 5, [28, 28], scope="l0", strides=4, bias=True)
    return net
コード例 #8
0
conv4_1 = conv_2d(pool3,          nb_filter = 512, filter_size = 3, strides=1, padding = 'same' ,activation='relu')
conv4_2 = conv_2d(conv4_1,        nb_filter = 512, filter_size = 3, strides=1, padding = 'same' ,activation='relu')
conv4_3 = conv_2d(conv4_2,        nb_filter = 512, filter_size = 3, strides=1, padding = 'same' ,activation='relu')
pool4 = max_pool_2d(conv4_3, kernel_size = 2, strides=2)

conv5_1 = conv_2d(pool4,          nb_filter = 512, filter_size = 3, strides=1, padding = 'same' ,activation='relu')
conv5_2 = conv_2d(conv5_1,        nb_filter = 512, filter_size = 3, strides=1, padding = 'same' ,activation='relu')
conv5_3 = conv_2d(conv5_2,        nb_filter = 512, filter_size = 3, strides=1, padding = 'same' ,activation='relu')

# DSN conv 1
score_dsn1_up = conv_2d(conv1_2,  nb_filter = 1  , filter_size = 1, strides=1, padding = 'same' ,activation='relu')
dsn1_loss = regression(score_dsn1_up, optimizer='rmsprop',loss='categorical_crossentropy',learning_rate=0.0001)

# DSN conv 2
score_dsn2 = conv_2d(conv2_2,  nb_filter = 1  , filter_size = 1, strides = 1, padding = 'same' ,activation='relu')
score_dsn2_up = conv_2d_transpose(score_dsn2,  nb_filter = 1  , filter_size = 4,output_shape=[224,224], strides = 2, padding = 'same' ,activation='relu')
dsn2_loss = regression(score_dsn2_up, optimizer='rmsprop',loss='categorical_crossentropy',learning_rate=0.0001)

# DSN conv 3
score_dsn3 = conv_2d(conv3_3,  nb_filter = 1  , filter_size = 1, strides = 1, padding = 'same' ,activation='relu')
score_dsn3_up = conv_2d_transpose(score_dsn3,  nb_filter = 1  , filter_size = 8, output_shape=[224,224],strides = 4, padding = 'same' ,activation='relu')
dsn3_loss = regression(score_dsn3_up, optimizer='rmsprop',loss='categorical_crossentropy',learning_rate=0.0001)

# DSN conv 4
score_dsn4 = conv_2d(conv4_3,  nb_filter = 1  , filter_size = 1, strides = 1, padding = 'same' ,activation='relu')
score_dsn4_up = conv_2d_transpose(score_dsn4,  nb_filter = 1  , filter_size = 16, output_shape=[224,224],strides = 8, padding = 'same' ,activation='relu')
dsn4_loss = regression(score_dsn4_up, optimizer='rmsprop',loss='categorical_crossentropy',learning_rate=0.0001)

# DSN conv 5
score_dsn5 = conv_2d(conv5_3,  nb_filter = 1  , filter_size = 1, strides = 1, padding = 'same' ,activation='relu')
score_dsn5_up = conv_2d_transpose(score_dsn5,  nb_filter = 1  , filter_size = 32, output_shape=[224,224],strides = 16, padding = 'same' ,activation='relu')
コード例 #9
0
def build_tiramisu(network):
    n = 5
    Ni = 8

    #Pool1
    network_1 = conv_2d(network, Ni, 3, regularizer='L2',
                        weight_decay=0.0001)  # 256
    network_1 = residual_block(network_1, n, Ni)
    network_1 = residual_block(network_1, 1, Ni)
    pool_1 = max_pool_2d(network_1, 2)  # downsampling 2x - 128
    #Pool2
    network_2 = residual_block(pool_1, n - 1, 2 * Ni)
    network_2 = residual_block(network_2, 1, 2 * Ni)
    pool_2 = max_pool_2d(network_2, 2)  # downsampling 4x - 64
    #Pool3
    network_3 = residual_block(pool_2, n - 1, 4 * Ni)
    network_3 = residual_block(network_3, 1, 4 * Ni)
    pool_3 = max_pool_2d(network_3, 2)  # downsampling 8x - 32
    #Pool4
    network_4 = residual_block(pool_3, n - 1, 8 * Ni)
    network_4 = residual_block(network_4, 1, 8 * Ni)
    pool_4 = max_pool_2d(network_4, 2)  # downsampling 16x - 16
    #Pool5
    network_5 = residual_block(pool_4, n - 1, 16 * Ni)
    network_5 = residual_block(network_5, 1, 16 * Ni)

    # /////////////////////////////////////////////////////////////////////////////////

    Unpool1 = conv_2d_transpose(network_5,
                                8 * Ni,
                                3,
                                strides=2,
                                output_shape=[HEIGHT // 8, WIDTH // 8, 8 * Ni])
    merge1 = merge([Unpool1, network_4], mode='concat', axis=3)  # merge
    merge1 = conv_2d(merge1, 8 * Ni, 3, activation='relu')
    merge1 = conv_2d(merge1, 8 * Ni, 3, activation='relu')  #

    Unpool2 = conv_2d_transpose(merge1,
                                4 * Ni,
                                3,
                                strides=2,
                                output_shape=[HEIGHT // 4, WIDTH // 4, 4 * Ni])
    merge2 = merge([Unpool2, network_3], mode='concat', axis=3)  # merge
    merge2 = conv_2d(merge2, 4 * Ni, 3, activation='relu')
    merge2 = conv_2d(merge2, 4 * Ni, 3, activation='relu')

    Unpool3 = conv_2d_transpose(merge2,
                                2 * Ni,
                                3,
                                strides=2,
                                output_shape=[HEIGHT // 2, WIDTH // 2, 2 * Ni])
    merge3 = merge([Unpool3, network_2], mode='concat', axis=3)  # merge
    merge3 = conv_2d(merge3, 2 * Ni, 3, activation='relu')
    merge3 = conv_2d(merge3, 2 * Ni, 3, activation='relu')

    Unpool4 = conv_2d_transpose(merge3,
                                Ni,
                                3,
                                strides=2,
                                output_shape=[HEIGHT, WIDTH, Ni])
    merge4 = merge([Unpool4, network_1], mode='concat', axis=3)  # merge
    merge4 = conv_2d(merge4, Ni, 3, activation='relu')
    merge4 = conv_2d(merge4, Ni, 3, activation='relu')

    final_merge = conv_2d(merge4, 3, 1, activation='relu')

    network = tflearn.regression(final_merge,
                                 optimizer='adam',
                                 loss='mean_square')

    return network
コード例 #10
0
def build_unet(network):
    Ni = 8
    #Pool1
    network_1 = conv_2d(network, Ni, 3, activation='relu')
    network_1 = conv_2d(network_1, Ni, 3, activation='relu')
    pool1 = max_pool_2d(network_1, 2)  # downsampling 2x
    #Pool2
    network_2 = conv_2d(pool1, 2 * Ni, 3, activation='relu')
    network_2 = conv_2d(network_2, 2 * Ni, 3, activation='relu')
    pool2 = max_pool_2d(network_2, 2)  # downsampling 4x
    #Pool3
    network_3 = conv_2d(pool2, 4 * Ni, 3, activation='relu')
    network_3 = conv_2d(network_3, 4 * Ni, 3, activation='relu')
    pool3 = max_pool_2d(network_3, 2)  # downsampling 8x
    #Pool4
    network_4 = conv_2d(pool3, 8 * Ni, 3, activation='relu')
    network_4 = conv_2d(network_4, 8 * Ni, 3, activation='relu')
    pool4 = max_pool_2d(network_4, 2)  # downsampling 16x

    #Pool5
    network_5 = conv_2d(pool4, 16 * Ni, 3, activation='relu')
    network_5 = conv_2d(network_5, 16 * Ni, 3, activation='relu')

    Unpool1 = conv_2d_transpose(network_5,
                                8 * Ni,
                                3,
                                strides=2,
                                output_shape=[HEIGHT // 8, WIDTH // 8, 8 * Ni])
    merge1 = merge([Unpool1, network_4], mode='concat', axis=3)  # merge
    merge1 = conv_2d(merge1, 8 * Ni, 3, activation='relu')
    merge1 = conv_2d(merge1, 8 * Ni, 3, activation='relu')

    Unpool2 = conv_2d_transpose(merge1,
                                4 * Ni,
                                3,
                                strides=2,
                                output_shape=[HEIGHT // 4, WIDTH // 4, 4 * Ni])
    merge1 = merge([Unpool2, network_3], mode='concat', axis=3)  # merge
    merge1 = conv_2d(merge1, 4 * Ni, 3, activation='relu')
    merge1 = conv_2d(merge1, 4 * Ni, 3, activation='relu')

    Unpool3 = conv_2d_transpose(merge1,
                                2 * Ni,
                                3,
                                strides=2,
                                output_shape=[HEIGHT // 2, WIDTH // 2, 2 * Ni])
    merge1 = merge([Unpool3, network_2], mode='concat', axis=3)  # merge
    merge1 = conv_2d(merge1, 2 * Ni, 3, activation='relu')
    merge1 = conv_2d(merge1, 2 * Ni, 3, activation='relu')

    Unpool4 = conv_2d_transpose(merge1,
                                Ni,
                                3,
                                strides=2,
                                output_shape=[HEIGHT, WIDTH, Ni])
    merge1 = merge([Unpool4, network_1], mode='concat', axis=3)  # merge
    merge1 = conv_2d(merge1, Ni, 3, activation='relu')
    merge1 = conv_2d(merge1, Ni, 3, activation='relu')

    merge1 = conv_2d(merge1, 3, 1, activation='relu')

    network = tflearn.regression(merge1, optimizer='adam', loss='mean_square')

    return network
コード例 #11
0
def build_segnet_half(network):
    #Pool1
    network_1 = conv_2d(network, 8, 3,
                        activation='relu')  #output 2x_downsampled
    network_1 = conv_2d(network_1, 8, 3,
                        activation='relu')  #output 2x_downsampled
    pool1 = max_pool_2d(network_1, 2)
    #Pool2
    network_2 = conv_2d(pool1, 16, 3,
                        activation='relu')  #output 4x_downsampled
    network_2 = conv_2d(network_2, 16, 3,
                        activation='relu')  #output 4x_downsampled
    pool2 = max_pool_2d(network_2, 2)
    #Pool3
    network_3 = conv_2d(pool2, 32, 3,
                        activation='relu')  #output 8x_downsampled
    network_3 = conv_2d(network_3, 32, 3,
                        activation='relu')  #output 8x_downsampled
    pool3 = max_pool_2d(network_3, 2)

    #Pool4
    network_3 = conv_2d(pool3, 64, 3,
                        activation='relu')  #output 16x_downsampled
    network_3 = conv_2d(network_3, 64, 3,
                        activation='relu')  #output 16x_downsampled
    pool4 = max_pool_2d(network_3, 2)

    # ----- decoder -----
    decoder = conv_2d_transpose(pool4,
                                64,
                                3,
                                strides=4,
                                output_shape=[
                                    HEIGHT // 4, WIDTH // 4, 64
                                ])  #  16x downsample to 4x downsample
    decoder = conv_2d(decoder, 64, 3, activation='relu')
    pool5 = conv_2d(decoder, 64, 3, activation='relu')

    decoder = conv_2d_transpose(pool3,
                                32,
                                3,
                                strides=2,
                                output_shape=[
                                    HEIGHT // 4, WIDTH // 4, 32
                                ])  # 8x downsample to 4x downsample
    decoder = conv_2d(decoder, 32, 3, activation='relu')
    pool6 = conv_2d(decoder, 32, 3, activation='relu')

    pool6 = merge([pool6, pool5, pool2], mode='concat',
                  axis=3)  #merge all 4x downsampled layers

    decoder = conv_2d_transpose(pool6,
                                16,
                                3,
                                strides=4,
                                output_shape=[HEIGHT, WIDTH, 16])
    decoder = conv_2d(decoder, 16, 3, activation='relu')
    pool6 = conv_2d(decoder, 16, 3, activation='relu')

    decoder = conv_2d(pool6, CHANNELS, 1)
    network = tflearn.regression(decoder, optimizer='adam', loss='mean_square')

    return network
コード例 #12
0
def build_autoencoder(network):
    # encoder
    encoder = conv_2d(network, 16, 7, activation='relu')
    encoder = conv_2d(encoder, 16, 7, activation='relu')
    encoder = max_pool_2d(encoder, 2)

    encoder = conv_2d(encoder, 32, 5, activation='relu')
    encoder = conv_2d(encoder, 32, 5, activation='relu')
    encoder = max_pool_2d(encoder, 2)

    encoder = conv_2d(encoder, 64, 3, activation='relu')
    encoder = conv_2d(encoder, 64, 3, activation='relu')
    encoder = max_pool_2d(encoder, 2)

    # decoder
    decoder = conv_2d_transpose(encoder,
                                64,
                                3,
                                strides=2,
                                output_shape=[HEIGHT // 4, WIDTH // 4, 64])
    decoder = conv_2d(decoder, 64, 3, activation='relu')
    decoder = conv_2d(decoder, 64, 3, activation='relu')

    decoder = conv_2d_transpose(decoder,
                                32,
                                5,
                                strides=2,
                                output_shape=[HEIGHT // 2, WIDTH // 2, 32])
    decoder = conv_2d(decoder, 32, 5, activation='relu')
    decoder = conv_2d(decoder, 32, 5, activation='relu')

    decoder = conv_2d_transpose(decoder,
                                16,
                                7,
                                strides=2,
                                output_shape=[HEIGHT, WIDTH, 16])
    decoder = conv_2d(decoder, 16, 7, activation='relu')
    decoder = conv_2d(decoder, 16, 7, activation='relu')

    # decoder = upsample_2d(decoder, 2)
    decoder = conv_2d(decoder, CHANNELS, 1)

    def my_loss(y_pred, y_true):
        return tflearn.objectives.weak_cross_entropy_2d(y_pred,
                                                        y_true,
                                                        num_classes=3)

    def my_metric(y_pred, y_true):
        return tflean.metrics.Top_k(k=3)

    network = regression(
        decoder,
        optimizer='adam',
        #loss='mean_square',
        loss='categorical_crossentropy',
        #loss='weak_cross_entropy_2d',
        #loss=my_loss,
        #learning_rate=0.00005,
        #learning_rate=0.0005,
        #metric=my_metric
    )

    return network