def get_model(): img_tensor = Input(shape=(64, 64, 3)) x = Conv2D(filters=32, kernel_size=(5, 5), strides=1, padding='same')(img_tensor) x = BatchNormalization()(x) x = LeakyReLU()(x) x = MaxPool2D(pool_size=(3, 3), strides=2, padding='valid')(x) x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='valid')(x) x = BatchNormalization()(x) x = LeakyReLU()(x) x = MaxPool2D(pool_size=(3, 3), strides=2, padding='valid')(x) x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='valid')(x) x = BatchNormalization()(x) x = LeakyReLU()(x) x = MaxPool2D(pool_size=(3, 3), strides=2, padding='valid')(x) x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x) x = BatchNormalization()(x) x = LeakyReLU()(x) x = AvgPool2D(pool_size=(2, 2), strides=1, padding='valid')( x ) # Blurred output produced by AvgPool2D, intuitively, gives a better estimate of filters used rather than sharp one produced by MaxPool2D because in blur output the neighboring colors are aggregated and sharp outputs often contain max values due to presence of edges. x = Flatten()(x) x = Dense(units=32, activation='relu')(x) x = Dropout(0.25)(x) predicted_class = Dense(units=num_classes, activation='softmax')(x) model = Model(inputs=[img_tensor], outputs=[predicted_class]) return model
def add(self, pool_size=(2, 2), strides=None, padding='valid', data_format=None, **kwargs): return self._add_layer( AvgPool2D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, **kwargs))
def keras_model(): """ Function for returning a basic keras model """ model = Sequential([ Conv2D(8, (2, 2), input_shape=(16, 16, 3,)), BatchNormalization(momentum=.3, epsilon=.65), AvgPool2D(), MaxPool2D(), BatchNormalization(momentum=.4, epsilon=.25), Conv2D(4, (2, 2), activation=tf.nn.tanh, kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=.5)), Flatten(), Dense(2, activation='softmax', name="keras_model")]) return model
def inception_a(self, inputs): unit_1 = AvgPool2D(padding='SAME', pool_size=(1, 1))(inputs) unit_1 = self.conv(unit_1, filters=96, kernel_size=1) unit_2 = self.conv(inputs, filters=96, kernel_size=1) unit_3 = self.conv(inputs, filters=64, kernel_size=1) unit_3 = self.conv(unit_3, filters=96, kernel_size=3) unit_4 = self.conv(inputs, filters=64, kernel_size=1) unit_4 = self.conv(unit_4, filters=96, kernel_size=3) unit_4 = self.conv(unit_4, filters=96, kernel_size=1) return Add()([unit_1, unit_2, unit_3, unit_4])
def build(self, input_shape): num_channel = self.num_channel num_initial_filter = num_channel * 32 kernel_size = 5 stride_size = 2 init_multiplier = 0.5 init_width = int(self.width * init_multiplier) init_height = int(self.height * init_multiplier) out_padding = 1 kernel_init = keras.initializers.RandomNormal(stddev=0.02) bias_init = keras.initializers.zeros() self.dense = Dense(init_width * init_height * num_initial_filter, input_shape=(input_shape[1], ), kernel_initializer=kernel_init, bias_initializer=bias_init, activation=tf.nn.leaky_relu) self.reshape = Reshape([init_height, init_width, num_initial_filter]) self.conv2d_tps = Sequential() for i in range(self.num_layer - 1): self.conv2d_tps.add( Conv2DTranspose(num_initial_filter // (2**(i + 1)), kernel_size, stride_size, padding="same", output_padding=out_padding, activation=tf.nn.leaky_relu, kernel_initializer=kernel_init, bias_initializer=bias_init)) self.conv2d_tps.add( layers.Conv2DTranspose(num_channel, kernel_size, stride_size, padding="same", output_padding=out_padding, kernel_initializer=kernel_init, bias_initializer=bias_init)) pool_size = int(init_multiplier * stride_size**self.num_layer) self.avgpool2d = AvgPool2D(pool_size) self.sigmoid = Activation(tf.nn.sigmoid)
def inception_c(self, inputs): unit_1 = AvgPool2D(padding='SAME', pool_size=(1, 1))(inputs) unit_1 = self.conv(unit_1, filters=256, kernel_size=1) unit_2 = self.conv(inputs, filters=256, kernel_size=1) unit_3 = self.conv(inputs, filters=384, kernel_size=1) unit_3_1 = self.conv(unit_3, filters=256, kernel_size=(1, 3)) unit_3_2 = self.conv(unit_3, filters=256, kernel_size=(3, 1)) unit_4 = self.conv(inputs, filters=384, kernel_size=1) unit_4 = self.conv(unit_4, filters=448, kernel_size=(1, 3)) unit_4 = self.conv(unit_4, filters=512, kernel_size=(3, 1)) unit_4_1 = self.conv(unit_4, filters=256, kernel_size=(3, 1)) unit_4_2 = self.conv(unit_4, filters=256, kernel_size=(1, 3)) return Add()([unit_1, unit_2, unit_3_1, unit_3_2, unit_4_1, unit_4_2])
def inception_b(self, inputs): unit_1 = AvgPool2D(padding='SAME', pool_size=(1, 1))(inputs) unit_1 = self.conv(unit_1, filters=256, kernel_size=1) unit_2 = self.conv(inputs, filters=256, kernel_size=1) unit_3 = self.conv(inputs, filters=192, kernel_size=1) unit_3 = self.conv(unit_3, filters=224, kernel_size=(1, 7)) unit_3 = self.conv(unit_3, filters=256, kernel_size=(1, 7)) unit_4 = self.conv(inputs, filters=192, kernel_size=1) unit_4 = self.conv(unit_4, filters=192, kernel_size=(1, 7)) unit_4 = self.conv(unit_4, filters=224, kernel_size=(7, 1)) unit_4 = self.conv(unit_4, filters=224, kernel_size=(1, 7)) unit_4 = self.conv(unit_4, filters=256, kernel_size=(7, 1)) return Add()([unit_1, unit_2, unit_3, unit_4])
img_tensor = Input(shape=(64, 64, 3)) x = Conv2D(filters=32, kernel_size=(5, 5), strides=2, padding='same')(img_tensor) x = LeakyReLU()(x) x = Conv2D(filters=64, kernel_size=(3, 3), strides=2, padding='same')(x) x = LeakyReLU()(x) x = MaxPool2D(pool_size=(3, 3), strides=2, padding='valid')(x) x = Conv2D(filters=128, kernel_size=(3, 3), strides=1, padding='same')(x) x = BatchNormalization()(x) x = LeakyReLU()(x) x = MaxPool2D(pool_size=(3, 3), strides=2, padding='valid')(x) x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x) x = LeakyReLU()(x) x = AvgPool2D(pool_size=(2, 2), strides=1, padding='valid')( x ) # Blurred output produced by AvgPool2D, intuitively, gives a better estimate of filters used rather than sharp one produced by MaxPool2D because in blur output the neighboring colors are aggregated and sharp outputs often contain max values due to presence of edges. x = Flatten()(x) x = Dropout(0.25)(x) predicted_class = Dense(units=num_classes, activation='softmax')(x) model = Model(inputs=[img_tensor], outputs=[predicted_class]) print(model.summary()) ''' model.compile(optimizer=optimizer, loss=loss, metrics=metrics) file_path = os.path.join(save_dir, model_name) checkpoint = ModelCheckpoint(file_path, monitor='val_acc', verbose=1, save_best_only=True, mode='auto', period=1) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, verbose=0, mode='auto', min_delta=0.0001, cooldown=0, min_lr=0) csv_logger = CSVLogger(os.path.join(save_dir, 'Log-Sequential-v1.log'), separator=',', append=False) train_data_generator = Train_data_generator(batch_size)
def MobilenetV2(inputs, alpha=1.0, num_classes=1000, include_top=True): """Implementation of MobilenetV2""" with tf.variable_scope('MobilenetV2'): with tf.variable_scope('Conv'): first_block = _make_divisible(32 * alpha, 8) x = Conv2D(first_block, 3, strides=2, padding='same', use_bias=False, name='Conv2D')(inputs) x = BatchNormalization(epsilon=1e-3, momentum=0.999, name='BatchNorm')(x) x = Activation(relu6)(x) x = inverted_residuals(x, 16, 3, stride=1, expansion=1, block_id=0, alpha=alpha, residual=False) x = inverted_residuals(x, 24, 3, stride=2, expansion=6, block_id=1, alpha=alpha, residual=False) x = inverted_residuals(x, 24, 3, stride=1, expansion=6, block_id=2, alpha=alpha) x = inverted_residuals(x, 32, 3, stride=2, expansion=6, block_id=3, alpha=alpha, residual=False) x = inverted_residuals(x, 32, 3, stride=1, expansion=6, block_id=4, alpha=alpha) x = inverted_residuals(x, 32, 3, stride=1, expansion=6, block_id=5, alpha=alpha) x = inverted_residuals(x, 64, 3, stride=2, expansion=6, block_id=6, alpha=alpha, residual=False) x = inverted_residuals(x, 64, 3, stride=1, expansion=6, block_id=7, alpha=alpha) x = inverted_residuals(x, 64, 3, stride=1, expansion=6, block_id=8, alpha=alpha) x = inverted_residuals(x, 64, 3, stride=1, expansion=6, block_id=9, alpha=alpha) x = inverted_residuals(x, 96, 3, stride=1, expansion=6, block_id=10, alpha=alpha, residual=False) x = inverted_residuals(x, 96, 3, stride=1, expansion=6, block_id=11, alpha=alpha) x = inverted_residuals(x, 96, 3, stride=1, expansion=6, block_id=12, alpha=alpha) x = inverted_residuals(x, 160, 3, stride=2, expansion=6, block_id=13, alpha=alpha, residual=False) x = inverted_residuals(x, 160, 3, stride=1, expansion=6, block_id=14, alpha=alpha) x = inverted_residuals(x, 160, 3, stride=1, expansion=6, block_id=15, alpha=alpha) x = inverted_residuals(x, 320, 3, stride=1, expansion=6, block_id=16, alpha=alpha, residual=False) x = Conv2D(_make_divisible(1280 * alpha, 8), 1, use_bias=False)(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x) x = Activation(relu6)(x) if include_top: with tf.variable_scope('Predictions'): x = AvgPool2D((7, 7))(x) x = Conv2D(num_classes, 1, activation='softmax', use_bias=True)(x) x = Reshape((num_classes, ), name='Reshape_1')(x) return x
def block1_module2(self, net, name): # 1x1 net_1x1 = Conv2D(filters=64, kernel_size=1, padding='same', activation='relu', name=name + '_1x1', kernel_regularizer=regularizers.l2( self.weight_decay))(net) net_1x1 = BatchNormalization()(net_1x1) # 5x5 net_5x5 = Conv2D(filters=48, kernel_size=1, padding='same', activation='relu', name=name + '_5x5', kernel_regularizer=regularizers.l2( self.weight_decay))(net) net_5x5 = BatchNormalization(net_5x5) net_5x5 = Conv2D(filters=64, kernel_size=5, padding='same', activation='relu', name=name + '_5x5_2', kernel_regularizer=regularizers.l2( self.weight_decay))(net_5x5) net_5x5 = BatchNormalization()(net_5x5) # 3x3 net_3x3 = Conv2D(filters=64, kernel_size=1, padding='same', activation='relu', name=name + '_3x3', kernel_regularizer=regularizers.l2( self.weight_decay))(net) net_3x3 = BatchNormalization()(net_3x3) net_3x3 = Conv2D(filters=96, kernel_size=3, activation='relu', padding='same', name=name + '_3x3_2', kernel_regularizer=regularizers.l2( self.weight_decay))(net_3x3) net_3x3 = BatchNormalization()(net_3x3) net_3x3 = Conv2D(filters=96, kernel_size=3, activation='relu', padding='same', name=name + '_3x3_3', kernel_regularizer=regularizers.l2( self.weight_decay))(net_3x3) net_3x3 = BatchNormalization()(net_3x3) # 1x1xavg net_1x1_avg = AvgPool2D(pool_size=3, strides=1, padding='same', name=name + '_net_1x1_avg')(net) net_1x1_avg = Conv2D(filters=64, kernel_size=1, activation='relu', padding='same', name=name + '_net_1x1_avg_conv1', kernel_regularizer=regularizers.l2( self.weight_decay))(net_1x1_avg) net = concatenate([net_1x1, net_5x5, net_3x3, net_1x1_avg], axis=-1, name=name + '_mixed') return net
def bulid_model(self): inputs = Input(shape=self.input_shape) net = inputs # block1 net = Conv2D(filters=32, kernel_size=3, strides=2, activation='relu', padding='same', name='bock1_conv1', kernel_regularizer=regularizers.l2( self.weight_decay))(net) net = BatchNormalization()(net) net = Conv2D(filters=32, kernel_size=3, activation='relu', padding='same', name='block1_conv2', kernel_regularizer=regularizers.l2( self.weight_decay)(net)) net = BatchNormalization()(net) net = Conv2D(filters=64, kernel_size=3, activation='relu', padding='same', name='block1_conv3', kernel_regularizer=regularizers.l2( self.weight_decay))(net) net = BatchNormalization()(net) net = MaxPooling2D(pool_size=3, strides=2, padding='same', name='block1_pool')(net) #block2 net = Conv2D(filters=80, kernel_size=1, activation='relu', padding='same', name='block2_conv1', kernel_regularizer=regularizers.l2( self.weight_decay))(net) net = BatchNormalization()(net) net = Conv2D(filters=192, kernel_size=3, activation='relu', padding='same', name='block2_conv2', kernel_regularizer=regularizers.l2( self.weight_decay))(net) net = BatchNormalization()(net) net = MaxPooling2D(pool_size=3, strides=2, padding='same', name='block2_pool')(net) net = self.block1_module1(net, 'block1_module1') net = self.block1_module2(net, "block1_module2") net = self.block1_module2(net, 'block1_module2_1') net = self.block2_module1(net) # 1x1 net_1x1 = Conv2D(filters=128, kernel_size=1, padding='same', activation='relu', name='block2_module2_1x1', kernel_regularizer=regularizers.l2( self.weight_decay))(net) net_1x1 = BatchNormalization()(net_1x1) # 1x7 net_1x7 = Conv2D(filters=128, kernel_size=(1, 1), padding='same', activation='relu', name='block2_module2_1x7_conv1', kernel_regularizer=regularizers.l2( self.weight_decay))(net) net_1x7 = BatchNormalization()(net_1x7) net_1x7 = Conv2D(filters=128, kernel_size=(1, 7), padding='same', activation='relu', name='block2_module2_1x7_conv2', kernel_regularizer=regularizers.l2( self.weight_decay))(net_1x7) net_1x7 = BatchNormalization()(net_1x7) net_1x7 = Conv2D(filters=192, kernel_size=(7, 1), padding='same', activation='relu', name='block2_module2_1x7_conv3', kernel_regularizer=regularizers.l2( self.weight_decay))(net_1x7) net_1x7 = BatchNormalization()(net_1x7) net_7x1 = Conv2D(filters=128, kernel_size=(1, 1), padding='same', activation='relu', name='block2_module2_7x1_conv1', kernel_regularizer=regularizers.l2( self.weight_decay))(net) net_7x1 = Conv2D(filters=128, kernel_size=(7, 1), padding='same', activation='relu', name='block2_module2_7x1_conv2', kernel_regularizer=regularizers.l2( self.weight_decay))(net_7x1) net_7x1 = Conv2D(filters=128, kernel_size=(7, 1), padding='same', activation='relu', name='block2_module2_7x1_conv3', kernel_regularizer=regularizers.l2( self.weight_decay))(net_7x1) net_7x1 = Conv2D(filters=192, kernel_size=(1, 7), padding='same', activation='relu', name='block2_module2_7x1_conv4', kernel_regularizer=regularizers.l2( self.weight_decay))(net_7x1) net_avg = AvgPool2D(pool_size=3, strides=1, padding='same', name='block2_module2_1x1_avg')(net) net_avg = Conv2D(filters=192, kernel_size=1, padding='same', activation='relu', name='block2_module2_1x1_avg_conv1', kernel_regularizer=regularizers.l2( self.weight_decay))(net_avg) net = concatenate([net_1x1, net_1x7, net_7x1, net_avg], axis=-1) net = self.block2_modul3_4(net, 'block2_module3') net = self.block2_modul3_4(net, 'block2_module4') # 1x1 net_1x1 = Conv2D(filters=192, kernel_size=1, padding='same', activation='relu', name='block2_module5_1x1', kernel_regularizer=regularizers.l2( self.weight_decay))(net) net_1x1 = BatchNormalization()(net_1x1) # 1x7 net_1x7 = Conv2D(filters=192, kernel_size=(1, 1), padding='same', activation='relu', name='block2_module5_1x7_conv1', kernel_regularizer=regularizers.l2( self.weight_decay))(net) net_1x7 = BatchNormalization()(net_1x7) net_1x7 = Conv2D(filters=192, kernel_size=(1, 7), padding='same', activation='relu', name='block2_module5_1x7_conv2', kernel_regularizer=regularizers.l2( self.weight_decay))(net_1x7) net_1x7 = BatchNormalization()(net_1x7) net_1x7 = Conv2D(filters=192, kernel_size=(7, 1), padding='same', activation='relu', name='block2_module5_1x7_conv3', kernel_regularizer=regularizers.l2( self.weight_decay))(net_1x7) net_1x7 = BatchNormalization()(net_1x7) net_7x1 = Conv2D(filters=192, kernel_size=(1, 1), padding='same', activation='relu', name='block2_module5_7x1_conv1', kernel_regularizer=regularizers.l2( self.weight_decay))(net) net_7x1 = Conv2D(filters=192, kernel_size=(7, 1), padding='same', activation='relu', name='block2_module5_7x1_conv2', kernel_regularizer=regularizers.l2( self.weight_decay))(net_7x1) net_7x1 = Conv2D(filters=192, kernel_size=(7, 1), padding='same', activation='relu', name='block2_module5_7x1_conv3', kernel_regularizer=regularizers.l2( self.weight_decay))(net_7x1) net_7x1 = Conv2D(filters=192, kernel_size=(1, 7), padding='same', activation='relu', name='block2_module5_7x1_conv4', kernel_regularizer=regularizers.l2( self.weight_decay))(net_7x1) net_avg = AvgPool2D(pool_size=3, strides=1, padding='same', name='block2_module5_1x1_avg')(net) net_avg = Conv2D(filters=192, kernel_size=1, padding='same', activation='relu', name='block2_module5_1x1_avg_conv1', kernel_regularizer=regularizers.l2( self.weight_decay))(net_avg) net = concatenate([net_1x1, net_1x7, net_7x1, net_avg], axis=-1)
def block2_modul3_4(self, net, name): # 1x1 net_1x1 = Conv2D(filters=192, kernel_size=1, padding='same', activation='relu', name=name + "_1x1_conv1", kernel_regularizer=regularizers.l2( self.weight_decay))(net) net_1x1 = BatchNormalization()(net_1x1) # 1x7 net_1x7 = Conv2D(filters=160, kernel_size=(1, 1), padding='same', activation='relu', name=name + '_1x7_conv1', kernel_regularizer=regularizers.l2( self.weight_decay))(net) net_1x7 = BatchNormalization()(net_1x7) net_1x7 = Conv2D(filters=160, kernel_size=(1, 7), padding='same', activation='relu', name=name + '_1x7_conv2', kernel_regularizer=regularizers.l2( self.weight_decay))(net_1x7) net_1x7 = BatchNormalization()(net_1x7) net_1x7 = Conv2D(filters=192, kernel_size=(7, 1), padding='same', activation='relu', name=name + '_1x7_conv3', kernel_regularizer=regularizers.l2( self.weight_decay))(net_1x7) net_1x7 = BatchNormalization()(net_1x7) net_7x1 = Conv2D(filters=160, kernel_size=(1, 1), padding='same', activation='relu', name=name + '_7x1_conv1', kernel_regularizer=regularizers.l2( self.weight_decay))(net) net_7x1 = Conv2D(filters=160, kernel_size=(7, 1), padding='same', activation='relu', name=name + '_7x1_conv2', kernel_regularizer=regularizers.l2( self.weight_decay))(net_7x1) net_7x1 = Conv2D(filters=160, kernel_size=(7, 1), padding='same', activation='relu', name=name + '_7x1_conv3', kernel_regularizer=regularizers.l2( self.weight_decay))(net_7x1) net_7x1 = Conv2D(filters=192, kernel_size=(1, 7), padding='same', activation='relu', name=name + '_7x1_conv4', kernel_regularizer=regularizers.l2( self.weight_decay))(net_7x1) net_avg = AvgPool2D(pool_size=3, strides=1, padding='same', name=name + '_1x1_avg')(net) net_avg = Conv2D(filters=192, kernel_size=1, padding='same', activation='relu', name=name + '_1x1_avg_conv1', kernel_regularizer=regularizers.l2( self.weight_decay))(net_avg) net = concatenate([net_1x1, net_1x7, net_7x1, net_avg], axis=-1) return net
def constrained_adversarial_autoencoder_Chen(z, x, dropout_rate, dropout, config): outputs = {} dim = 64 with tf.variable_scope('Encoder'): encoder = Bunch({ # Model definition 'enc_conv': Conv2D(filters=dim, kernel_size=3, padding='same'), 'enc_res1_conv1': Conv2D(filters=2 * dim, kernel_size=3, padding='same'), 'enc_res1_layernorm1': LayerNormalization([1, 2]), 'enc_res1_conv2': Conv2D(filters=2 * dim, kernel_size=3, strides=2, padding='same'), 'enc_res1_layernorm2': LayerNormalization([1, 2]), 'enc_res1_shortcut1': Conv2D(filters=2 * dim, kernel_size=1, padding='same'), 'enc_res1_shortcut2': AvgPool2D(), 'enc_res2_conv1': Conv2D(filters=4 * dim, kernel_size=3, padding='same'), 'enc_res2_layernorm1': LayerNormalization([1, 2]), 'enc_res2_conv2': Conv2D(filters=4 * dim, kernel_size=3, strides=2, padding='same'), 'enc_res2_layernorm2': LayerNormalization([1, 2]), 'enc_res2_shortcut1': Conv2D(filters=4 * dim, kernel_size=1, padding='same'), 'enc_res2_shortcut2': AvgPool2D(), 'enc_res3_conv1': Conv2D(filters=8 * dim, kernel_size=3, padding='same'), 'enc_res3_layernorm1': LayerNormalization([1, 2]), 'enc_res3_conv2': Conv2D(filters=8 * dim, kernel_size=3, strides=2, padding='same'), 'enc_res3_layernorm2': LayerNormalization([1, 2]), 'enc_res3_shortcut1': Conv2D(filters=8 * dim, kernel_size=1, padding='same'), 'enc_res3_shortcut2': AvgPool2D(), 'enc_res4_conv1': Conv2D(filters=8 * dim, kernel_size=3, padding='same'), 'enc_res4_layernorm1': LayerNormalization([1, 2]), 'enc_res4_conv2': Conv2D(filters=8 * dim, kernel_size=3, padding='same'), 'enc_res4_layernorm2': LayerNormalization([1, 2]), 'enc_flatten': Flatten(), 'enc_dense': Dense(config.zDim), }) features, z_ = evaluate_encoder(encoder, x) outputs['z_'] = z_ with tf.variable_scope('Decoder'): decoder = Bunch({ # Model definition 'dec_1': Dense(np.prod(features.get_shape().as_list()[1:])), 'dec_res1_conv1': Conv2D(filters=8 * dim, kernel_size=3, padding='same'), 'dec_res1_layernorm1': LayerNormalization([1, 2]), 'dec_res1_conv2': Conv2DTranspose(filters=8 * dim, kernel_size=3, padding='same'), 'dec_res1_layernorm2': LayerNormalization([1, 2]), 'dec_res2_conv1': Conv2D(filters=4 * dim, kernel_size=3, padding='same'), 'dec_res2_layernorm1': LayerNormalization([1, 2]), 'dec_res2_conv2': Conv2DTranspose(filters=4 * dim, kernel_size=3, strides=2, padding='same'), 'dec_res2_layernorm2': LayerNormalization([1, 2]), 'dec_res2_shortcut': Conv2DTranspose(filters=4 * dim, kernel_size=1, padding='same', strides=2), 'dec_res3_conv1': Conv2D(filters=2 * dim, kernel_size=3, padding='same'), 'dec_res3_layernorm1': LayerNormalization([1, 2]), 'dec_res3_conv2': Conv2DTranspose(filters=2 * dim, kernel_size=3, strides=2, padding='same'), 'dec_res3_layernorm2': LayerNormalization([1, 2]), 'dec_res3_shortcut': Conv2DTranspose(filters=2 * dim, kernel_size=1, padding='same', strides=2), 'dec_res4_conv1': Conv2D(filters=dim, kernel_size=3, padding='same'), 'dec_res4_layernorm1': LayerNormalization([1, 2]), 'dec_res4_conv2': Conv2DTranspose(filters=dim, kernel_size=3, strides=2, padding='same'), 'dec_res4_layernorm2': LayerNormalization([1, 2]), 'dec_res4_shortcut': Conv2DTranspose(filters=dim, kernel_size=1, padding='same', strides=2), # post process 'dec_layernorm': LayerNormalization([1, 2]), 'dec_conv': Conv2D(1, 1, padding='same'), }) outputs['x_hat'] = x_hat = evaluate_decoder(decoder, z_, features.get_shape().as_list()[1:]) # projecting reconstruction to latent space for constrained part outputs['z_rec'] = evaluate_encoder(encoder, x_hat)[1] # Discriminator with tf.variable_scope('Discriminator'): discriminator = [ Dense(400, activation=leaky_relu), Dense(200, activation=leaky_relu), Dense(1) ] # fake temp_out = z_ for layer in discriminator: temp_out = layer(temp_out) outputs['d_'] = temp_out # real temp_out = z for layer in discriminator: temp_out = layer(temp_out) outputs['d'] = temp_out # reparametrization epsilon = tf.random_uniform([], 0.0, 1.0) outputs['z_hat'] = z_hat = epsilon * z + (1 - epsilon) * z_ temp_out = z_hat for layer in discriminator: temp_out = layer(temp_out) outputs['d_hat'] = temp_out return outputs