def mask_generator(x): mask = sequential([ Dense(16), Reshape((1, 4, 4)), UpSampling2D((16, 16)) ])(x) depth_map = sequential([ Dense(16), Reshape((1, 4, 4)), UpSampling2D((4, 4)) ])(x) return mask, depth_map
def get_offset_back(inputs, nb_units): n = nb_units input = concat(inputs) back_feature_map = sequential([ UpSampling2D(), # 64x64 conv(n, 3, 3), conv(n, 3, 3), ], ns='offset.back')(input) return back_feature_map, sequential([ Convolution2D(1, 3, 3, border_mode='same'), LinearInBounds(-1, 1, clip=True), ], ns='offset.back_out')(back_feature_map)
def get_decoder_model( input, nb_units, nb_output=NUM_MIDDLE_CELLS + NUM_CONFIGS, depth=1, dense=[]): def dense_bn(n): return [ Dense(n), BatchNormalization(), Activation('relu') ] n = nb_units d = depth return sequential([ conv(n, depth=d), MaxPooling2D(), # 32x32 conv(2*n, depth=d), MaxPooling2D(), # 16x16 conv(4*n, depth=d), MaxPooling2D(), # 8x8 conv(8*n, depth=d), MaxPooling2D(), # 4x4 conv(16*n, depth=d), Flatten(), [dense_bn(d) for d in dense], Dense(nb_output) ])(input)
def mask_generator(input, nb_units=64, dense_factor=3, nb_dense_layers=2, trainable=True): n = nb_units def conv(n): return [ Convolution2D(n, 3, 3, border_mode='same', init='he_normal'), Activation('relu'), ] dense_layers = [ Dense(dense_factor*nb_units, activation='relu') for _ in range(nb_dense_layers)] return sequential(dense_layers + [ Dense(8*n*4*4), Activation('relu'), Reshape((8*n, 4, 4,)), UpSampling2D(), # 8x8 conv(4*n), conv(4*n), UpSampling2D(), # 16x16 conv(2*n), conv(2*n), UpSampling2D(), # 32x32 conv(n), UpSampling2D(), # 64x64 conv(n), Convolution2D(1, 3, 3, border_mode='same', init='he_normal'), Activation('linear'), ], ns='mask_gen', trainable=trainable)(input)
def mask_blending_discriminator(x, n=32, conv_repeat=1, dense=[], out_activation='sigmoid'): def conv(n): layers = [ Convolution2D(n, 3, 3, subsample=(2, 2), border_mode='same'), BatchNormalization(axis=1), LeakyReLU(0.2), ] return layers + [[ Convolution2D(n, 3, 3, border_mode='same'), BatchNormalization(axis=1), LeakyReLU(0.2), ] for _ in range(conv_repeat-1)] def get_dense(nb): return [ Dense(nb), BatchNormalization(axis=1), LeakyReLU(0.2), ] return sequential([ Convolution2D(n, 5, 5, subsample=(2, 2), border_mode='same'), LeakyReLU(0.2), conv(2*n), conv(4*n), conv(8*n), Flatten(), [get_dense(nb) for nb in dense], Dense(1, activation=out_activation) ], ns='dis')(concat(x, axis=0, name='concat_fake_real'))
def test_sequential_flatten(): x = Input(shape=(20, )) seq = sequential([Dense(20), [Dense(10)], [[Dense(1)], Dense(1)]]) out = seq(x) model = Model([x], [out]) model.compile('adam', 'mse') model.predict_on_batch(np.random.sample((64, 20)))
def get_offset_back(inputs, nb_units): n = nb_units input = concat(inputs) back_feature_map = sequential( [ UpSampling2D(), # 64x64 conv(n, 3, 3), conv(n, 3, 3), ], ns='offset.back')(input) return back_feature_map, sequential([ Convolution2D(1, 3, 3, border_mode='same'), LinearInBounds(-1, 1, clip=True), ], ns='offset.back_out')(back_feature_map)
def get_mask_postprocess(inputs, nb_units): n = nb_units return sequential([ conv(n, 3, 3), conv(n, 3, 3), Convolution2D(1, 5, 5, border_mode='same', init='normal'), ], ns='mask_post')(concat(inputs))
def get_mask_weight_blending(inputs, min=0, max=2): input = concat(inputs) return sequential([ Convolution2D(1, 3, 3), Flatten(), Dense(1), LinearInBounds(min, max, clip=True), ], ns='mask_weight_blending')(input)
def get_mask_postprocess(inputs, nb_units): n = nb_units return sequential([ conv(n, 3, 3), conv(n, 3, 3), Deconvolution2D(1, 5, 5, border_mode=(2, 2)), LinearInBounds(-1, 1, clip=True), ], ns='mask_post')(concat(inputs))
def d(): x = Input(shape=(input_size + output_size, nb_chars)) d_realness = sequential([ LSTM(100), Dense(1, activation='sigmoid'), ])(x) d = Model([x], [d_realness]) return d
def get_offset_middle(inputs, nb_units): n = nb_units input = concat(inputs) return sequential([ UpSampling2D(), # 32x32 conv(2*n, 3, 3), conv(2*n, 3, 3), conv(2*n, 3, 3), ], ns='offset.middle')(input)
def simple_gan(): z = Input(batch_shape=simple_gan_z_shape, name='z') generator = sequential([ Dense(simple_gan_nb_z, activation='relu', name='g1'), Dense(simple_gan_nb_z, activation='relu', name='g2'), Dense(simple_gan_nb_out, activation='sigmoid', name='g3'), ])(z) fake = Input(batch_shape=simple_gan_real_shape, name='fake') real = Input(batch_shape=simple_gan_real_shape, name='real') discriminator = sequential([ Dense(20, activation='relu', input_dim=2, name='d1'), Dense(1, activation='sigmoid', name='d2') ])(concat([fake, real], axis=0)) return GAN(Container(z, generator), Container([fake, real], gan_outputs(discriminator)), simple_gan_z_shape[1:], simple_gan_real_shape[1:])
def m(): x = Input(shape=(input_size + output_size, nb_chars)) m_realness = sequential([ LSTM(14), Dense(1, activation='sigmoid'), ])(x) m = Model([x], [m_realness]) m.compile(Adam(), 'mse') return m
def get_mask_weight_blending(inputs, min=0, max=2): input = concat(inputs) return sequential([ Convolution2D(1, 3, 3), Flatten(), Dense(1), LinearInBounds(K.variable(min), K.variable(2), clip=True), ], ns='mask_weight_blending')(input)
def mask_generator(input, nb_units=64, dense_factor=3, nb_dense_layers=2, depth=2, nb_output_channels=1, trainable=True): n = nb_units def conv(n, repeats=1): return [ [ Convolution2D(n, 3, 3, border_mode='same', init='he_normal'), Activation('relu') ] for _ in range(repeats) ] dense_layers = [ Dense(dense_factor*nb_units, activation='relu') for _ in range(nb_dense_layers)] base = sequential(dense_layers + [ Dense(8*n*4*4), Activation('relu'), Reshape((8*n, 4, 4,)), conv(8*n), UpSampling2D(), # 8x8 conv(4*n, depth), UpSampling2D(), # 16x16 conv(2*n), ], ns='mask_gen.base', trainable=trainable)(input) mask = sequential([ conv(2*n, depth), UpSampling2D(), # 32x32 conv(n, 2), UpSampling2D(), # 64x64 conv(n, 1), Convolution2D(1, 3, 3, border_mode='same', init='he_normal'), ], ns='mask_gen.mask', trainable=trainable)(base) depth_map = sequential([ conv(n // 2, depth - 1), Convolution2D(1, 3, 3, border_mode='same', init='he_normal'), ], ns='mask_gen.depth_map', trainable=trainable)(base) return mask, depth_map
def mask_generator_extra(input, nb_units=64, nb_dense=[256, 1024], depth=2, project_factor=1, filter_size=3): n = nb_units def conv(n, repeats=1): return [ [ Convolution2D(n, filter_size, filter_size, border_mode='same', init='he_normal'), Activation('relu') ] for _ in range(repeats) ] dense_layers = [] for nb in nb_dense: dense_layers.append( Dense(nb, init='he_normal', activation='relu') ) base = sequential(dense_layers + [ Dense(8*n*4*4*project_factor), Activation('relu'), Reshape((8*n*project_factor, 4, 4,)), conv(8*n, depth), UpSampling2D(), # 8x8 conv(4*n, depth), UpSampling2D(), # 16x16 conv(2*n), ], ns='mask_gen.base')(input) mask = sequential([ conv(2*n, depth), UpSampling2D(), # 32x32 conv(n, depth), UpSampling2D(), # 64x64 conv(n, depth - 1), Convolution2D(1, 5, 5, border_mode='same', init='he_normal'), ], ns='mask_gen.mask')(base) depth_map = sequential([ conv(n // 2, depth - 1), Convolution2D(1, 3, 3, border_mode='same', init='he_normal'), ], ns='mask_gen.depth_map')(base) return mask, depth_map
def get_offset_middle(inputs, nb_units): n = nb_units input = concat(inputs) return sequential( [ UpSampling2D(), # 32x32 conv(2 * n, 3, 3), conv(2 * n, 3, 3), conv(2 * n, 3, 3), ], ns='offset.middle')(input)
def test_sequential(): x = Input(shape=(20,)) seq = sequential([ Dense(20), Dense(10), Dense(1), ]) out = seq(x) model = Model([x], [out]) model.compile('adam', 'mse') model.predict_on_batch(np.random.sample((64, 20)))
def mask_generator_all_conv(input, nb_units=64, depth=2, filter_size=3): n = nb_units def conv(n, repeats=1, f=None): if f is None: f = filter_size return [ [ Convolution2D(n, f, f, border_mode='same', init='he_normal'), Activation('relu') ] for _ in range(repeats) ] base = sequential([ Reshape((22, 1, 1,)), conv(8*n, depth, f=1), UpSampling2D(), # 2x2 conv(8*n, depth, f=2), UpSampling2D(), # 4x4 conv(8*n, depth), UpSampling2D(), # 8x8 conv(4*n, depth), UpSampling2D(), # 16x16 conv(2*n), ], ns='mask_gen.base')(input) mask = sequential([ conv(2*n, depth), UpSampling2D(), # 32x32 conv(n, depth), UpSampling2D(), # 64x64 conv(n, depth - 1), Convolution2D(1, 3, 3, border_mode='same', init='he_normal'), ], ns='mask_gen.mask')(base) depth_map = sequential([ conv(n // 2, depth - 1), Convolution2D(1, 3, 3, border_mode='same', init='he_normal'), ], ns='mask_gen.depth_map')(base) return mask, depth_map
def test_sequential_namespace(): x = Input(shape=(20, )) dense1 = Dense(20) dense2 = Dense(10) dense3 = Dense(1) seq = sequential([ dense1, dense2, dense3, ], ns='hello') seq(x) assert dense1.name.startswith('hello.') assert dense2.name.startswith('hello.') assert dense3.name.startswith('hello.')
def test_sequential_enumerate(): x = Input(shape=(20, )) dense1 = Dense(20) dense2 = Dense(10) dense3 = Dense(1) seq = sequential([ dense1, dense2, dense3, ], ns='hello') seq(x) assert dense1.name.endswith('hello.00_dense') assert dense2.name.endswith('hello.01_dense') assert dense3.name.endswith('hello.02_dense')
def g(): seq = Input(shape=(input_size, nb_chars)) z = Input(shape=(z_size,)) z_rep = RepeatVector(input_size)(z) seq_and_z = merge([seq, z_rep], mode='concat', concat_axis=-1) fake_prob = sequential([ LSTM(8), RepeatVector(output_size), LSTM(8, return_sequences=True), TimeDistributed(Dense(nb_chars, activation='softmax')), ])(seq_and_z) g = Model([z, seq], [fake_prob]) return g
def test_sequential_trainable(): x = Input(shape=(20,)) dense1 = Dense(20) dense2 = Dense(10) dense3 = Dense(1) seq = sequential([ dense1, dense2, dense3, ], trainable=False) seq(x) assert collect_trainable_weights(dense1) == [] assert collect_trainable_weights(dense2) == [] assert collect_trainable_weights(dense3) == []
def test_sequential_namespace(): x = Input(shape=(20,)) dense1 = Dense(20) dense2 = Dense(10) dense3 = Dense(1) seq = sequential([ dense1, dense2, dense3, ], ns='hello') seq(x) assert dense1.name.startswith('hello.') assert dense2.name.startswith('hello.') assert dense3.name.startswith('hello.')
def test_sequential_enumerate(): x = Input(shape=(20,)) dense1 = Dense(20) dense2 = Dense(10) dense3 = Dense(1) seq = sequential([ dense1, dense2, dense3, ], ns='hello') seq(x) assert dense1.name.endswith('hello.00_dense') assert dense2.name.endswith('hello.01_dense') assert dense3.name.endswith('hello.02_dense')
def test_sequential_trainable(): x = Input(shape=(20, )) dense1 = Dense(20) dense2 = Dense(10) dense3 = Dense(1) seq = sequential([ dense1, dense2, dense3, ], trainable=False) seq(x) assert collect_trainable_weights(dense1) == [] assert collect_trainable_weights(dense2) == [] assert collect_trainable_weights(dense3) == []
def g(): seq = Input(shape=(input_size, nb_chars)) z = Input(shape=(z_size, )) z_rep = RepeatVector(input_size)(z) seq_and_z = merge([seq, z_rep], mode='concat', concat_axis=-1) fake_prob = sequential([ LSTM(8), RepeatVector(output_size), LSTM(8, return_sequences=True), TimeDistributed(Dense(nb_chars, activation='softmax')), ])(seq_and_z) g = Model([z, seq], [fake_prob]) return g
def get_offset_front(inputs, nb_units): n = nb_units input = concat(inputs) return sequential([ Dense(8*n*4*4), BatchNormalization(), Activation('relu'), Reshape((8*n, 4, 4)), UpSampling2D(), # 8x8 conv(4*n, 3, 3), conv(4*n, 3, 3), UpSampling2D(), # 16x16 conv(2*n, 3, 3), conv(2*n, 3, 3), ], ns='offset.front')(input)
def get_mask_driver(x, nb_units, nb_output_units): n = nb_units driver = sequential([ Dense(n), BatchNormalization(), Dropout(0.25), Activation('relu'), Dense(n), BatchNormalization(), Dropout(0.25), Activation('relu'), Dense(nb_output_units), BatchNormalization(gamma_init=constant_init(0.25)), LinearInBounds(-1, 1, clip=True), ], ns='driver') return driver(x)
def mask_blending_discriminator(x, n=32, out_activation='sigmoid'): def conv(n): return [ Convolution2D(n, 5, 5, subsample=(2, 2), border_mode='same'), BatchNormalization(), LeakyReLU(0.2), ] return sequential([ Convolution2D(n, 5, 5, subsample=(2, 2), border_mode='same'), LeakyReLU(0.2), conv(2*n), conv(4*n), conv(8*n), Flatten(), Dense(1, activation=out_activation) ], ns='dis')(concat(x, axis=0, name='concat_fake_real'))
def get_mask_driver(x, nb_units, nb_output_units): n = nb_units driver = sequential([ Dense(n), BatchNormalization(), Dropout(0.25), Activation('relu'), Dense(n), BatchNormalization(), Dropout(0.25), Activation('relu'), Dense(nb_output_units), BatchNormalization(), LinearInBounds(-1, 1), ], ns='driver') return driver(x)
def get_offset_front(inputs, nb_units): n = nb_units input = concat(inputs) return sequential( [ Dense(8 * n * 4 * 4), BatchNormalization(), Activation('relu'), Reshape((8 * n, 4, 4)), UpSampling2D(), # 8x8 conv(4 * n, 3, 3), conv(4 * n, 3, 3), UpSampling2D(), # 16x16 conv(2 * n, 3, 3), conv(2 * n, 3, 3), ], ns='offset.front')(input)
def mask_blending_discriminator(x, n=32, out_activation='sigmoid'): def conv(n): return [ Convolution2D(n, 5, 5, subsample=(2, 2), border_mode='same'), BatchNormalization(), LeakyReLU(0.2), ] return sequential([ Convolution2D(n, 5, 5, subsample=(2, 2), border_mode='same'), LeakyReLU(0.2), conv(2 * n), conv(4 * n), conv(8 * n), Flatten(), Dense(1, activation=out_activation) ], ns='dis')(concat(x, axis=0, name='concat_fake_real'))
def get_offset_merge_mask(input, nb_units, nb_conv_layers, poolings=None, ns=None): def conv_layers(units, pooling): layers = [Convolution2D(units, 3, 3, border_mode='same')] if pooling: layers.append(MaxPooling2D()) layers.extend([ BatchNormalization(axis=1), Activation('relu'), ]) return layers if poolings is None: poolings = [False] * nb_conv_layers if type(nb_units) == int: nb_units = [nb_units] * nb_conv_layers layers = [] for i, (units, pooling) in enumerate(zip(nb_units, poolings)): layers.extend(conv_layers(units, pooling)) return sequential(layers, ns=ns)(input)
def get_lighting_generator(inputs, nb_units): n = nb_units input = concat(inputs) light_conv = sequential([ conv(n, 5, 5), conv(n, 5, 5), conv(n, 3, 3), UpSampling2D(), # 32x32 conv(n, 5, 5), Convolution2D(2, 1, 1, border_mode='same'), UpSampling2D(), # 64x64 LinearInBounds(-1, 1, clip=True), GaussianBlur(sigma=4), ], ns='lighting')(input) shift = Split(0, 1, axis=1)(light_conv) scale = Split(1, 2, axis=1)(light_conv) return shift, scale
def mask_generator(input, nb_units=64, dense_factor=3, nb_dense_layers=2, trainable=True): n = nb_units def conv(n): return [ Convolution2D(n, 3, 3, border_mode='same', init='he_normal'), Activation('relu'), ] dense_layers = [ Dense(dense_factor * nb_units, activation='relu') for _ in range(nb_dense_layers) ] return sequential( dense_layers + [ Dense(8 * n * 4 * 4), Activation('relu'), Reshape(( 8 * n, 4, 4, )), UpSampling2D(), # 8x8 conv(4 * n), conv(4 * n), UpSampling2D(), # 16x16 conv(2 * n), conv(2 * n), UpSampling2D(), # 32x32 conv(n), UpSampling2D(), # 64x64 conv(n), Convolution2D(1, 3, 3, border_mode='same', init='he_normal'), Activation('linear'), ], ns='mask_gen', trainable=trainable)(input)
def get_lighting_generator(inputs, nb_units): n = nb_units input = concat(inputs) light_conv = sequential( [ conv(n, 5, 5), conv(n, 5, 5), conv(n, 3, 3), UpSampling2D(), # 32x32 conv(n, 5, 5), Convolution2D(2, 1, 1, border_mode='same'), UpSampling2D(), # 64x64 LinearInBounds(-1, 1, clip=True), GaussianBlur(sigma=4), ], ns='lighting')(input) shift = Split(0, 1, axis=1)(light_conv) scale = Split(1, 2, axis=1)(light_conv) return shift, scale
def discriminator_fn(x): return gan_outputs(sequential([ Flatten(), Dense(1), ])(concat(x)), fake_for_gen=(0, 10), fake_for_dis=(0, 10), real=(10, 20))
def offset_front(x): return sequential([ Dense(16), Reshape((1, 4, 4)), UpSampling2D((4, 4)) ])(concat(x))
def offset_back(x): feature_map = sequential([ UpSampling2D(), ])(concat(x)) return feature_map, Convolution2D(1, 3, 3, border_mode='same')(feature_map)
def mask_post(x): return sequential([ Convolution2D(1, 3, 3, border_mode='same') ])(concat(x))
def mask_weight_blending(x): return sequential([ Flatten(), Dense(1), ])(x)