def discriminator(self, input_, target, stride=2, layer_count=4): """ Using the PatchGAN as a discriminator """ input_ = tf.concat([input_, target], axis=3, name='Concat') layer_specs = self.num_discriminator_filters * np.array([1, 2, 4, 8]) for i, output_channels in enumerate(layer_specs, 1): with tf.variable_scope('Layer' + str(i)) as scope: if i != 1: input_ = BatchNorm(input_, isTrain=self.isTrain) if i == layer_count: stride = 1 input_ = LeakyReLU(input_) input_ = Conv(input_, output_channels=output_channels, kernel_size=4, stride=stride, padding='VALID', mode='discriminator') with tf.variable_scope('Final_Layer') as scope: output = Conv(input_, output_channels=1, kernel_size=4, stride=1, padding='VALID', mode='discriminator') return tf.sigmoid(output)
def generator_i(self, input_): with tf.variable_scope("NEW", reuse=tf.AUTO_REUSE): """ 54 Layer Tiramisu """ with tf.variable_scope('InputConv') as scope: input_ = Conv(input_, kernel_size=3, stride=1, output_channels=self.growth_rate * 4) collect_conv = [] for i in range(1, 6): input_ = self.DenseBlock(input_, name='Encoder' + str(i), layers=self.layers) collect_conv.append(input_) input_ = self.TransitionDown(input_, name='TD' + str(i)) input_ = self.DenseBlock(input_, name='BottleNeck', layers=15) for i in range(1, 6): input_ = self.TransitionUp(input_, output_channels=self.growth_rate * 4, name='TU' + str(6 - i)) input_ = tf.concat([input_, collect_conv[6 - i - 1]], axis=3, name='Decoder' + str(6 - i) + '/Concat') input_ = self.DenseBlock(input_, name='Decoder' + str(6 - i), layers=self.layers) with tf.variable_scope('OutputConv') as scope: output = Conv(input_, kernel_size=1, stride=1, output_channels=3) return tf.nn.tanh(output)
def Layer(self, input_): """ This function creates the components inside a composite layer of a Dense Block. """ with tf.variable_scope("Composite"): next_layer = BatchNorm(input_, isTrain=self.isTrain) next_layer = ReLU(next_layer) next_layer = Conv(next_layer, kernel_size=3, stride=1, output_channels=self.growth_rate) next_layer = DropOut(next_layer, isTrain=self.isTrain, rate=0.2) return next_layer
def TransitionDown(self, input_, name): with tf.variable_scope(name): reduction = 0.5 reduced_output_size = int(int(input_.get_shape()[-1]) * reduction) next_layer = BatchNorm(input_, isTrain=self.isTrain, decay=self.decay) next_layer = Conv(next_layer, kernel_size=1, stride=1, output_channels=reduced_output_size) next_layer = DropOut(next_layer, isTrain=self.isTrain, rate=0.2) next_layer = AvgPool(next_layer) return next_layer
def op(node_id, op_id, x_shape, channels, strides): # x means feature maps br_op = None x_id_fact_reduce = None x_stride = strides if node_id in [ 0, 1 ] else 1 ## ??? why set strides=1 when x_id not in [0, 1] if op_id == 0: br_op = SepConv(C_in=channels, C_out=channels, kernel_size=3, strides=x_stride, padding='same') x_shape = [x_shape[0] // x_stride, x_shape[1] // x_stride, channels] elif op_id == 1: br_op = SepConv(C_in=channels, C_out=channels, kernel_size=5, strides=x_stride, padding='same') x_shape = [x_shape[0] // x_stride, x_shape[1] // x_stride, channels] elif op_id == 2: br_op = layers.AveragePooling2D(pool_size=3, strides=x_stride, padding='same') x_shape = [x_shape[0] // x_stride, x_shape[1] // x_stride, x_shape[-1]] elif op_id == 3: br_op = layers.MaxPool2D(pool_size=3, strides=x_stride, padding='same') x_shape = [x_shape[0] // x_stride, x_shape[1] // x_stride, x_shape[-1]] elif op_id == 4: br_op = Identity() if x_stride > 1: assert x_stride == 2 x_id_fact_reduce = FactorizedReduce(C_in=x_shape[-1], C_out=channels) x_shape = [ x_shape[0] // x_stride, x_shape[1] // x_stride, channels ] elif op_id == 5: br_op = Identity() if x_stride > 1: assert x_stride == 2 x_id_fact_reduce = FactorizedReduce(C_in=x_shape[-1], C_out=channels) x_shape = [ x_shape[0] // x_stride, x_shape[1] // x_stride, channels ] elif op_id == 6: br_op = Conv(C_in=channels, C_out=channels, kernel_size=1, strides=x_stride, padding='same') x_shape = [x_shape[0] // x_stride, x_shape[1] // x_stride, channels] elif op_id == 7: br_op = Conv(C_in=channels, C_out=channels, kernel_size=3, strides=x_stride, padding='same') x_shape = [x_shape[0] // x_stride, x_shape[1] // x_stride, channels] elif op_id == 8: br_op = Conv(C_in=channels, C_out=channels, kernel_size=(1, 3), strides=x_stride, padding='same') x_shape = [x_shape[0] // x_stride, x_shape[1] // x_stride, channels] elif op_id == 9: br_op = Conv(C_in=channels, C_out=channels, kernel_size=(1, 7), strides=x_stride, padding='same') x_shape = [x_shape[0] // x_stride, x_shape[1] // x_stride, channels] elif op_id == 10: br_op = layers.MaxPool2D(pool_size=2, strides=x_stride, padding='same') x_shape = [x_shape[0] // x_stride, x_shape[1] // x_stride, channels] elif op_id == 11: br_op = layers.MaxPool2D(pool_size=3, strides=x_stride, padding='same') x_shape = [x_shape[0] // x_stride, x_shape[1] // x_stride, channels] elif op_id == 12: br_op = layers.MaxPool2D(pool_size=5, strides=x_stride, padding='same') x_shape = [x_shape[0] // x_stride, x_shape[1] // x_stride, channels] elif op_id == 13: br_op = layers.AveragePooling2D(pool_size=2, strides=x_stride, padding='same') x_shape = [x_shape[0] // x_stride, x_shape[1] // x_stride, channels] elif op_id == 14: br_op = layers.AveragePooling2D(pool_size=3, strides=x_stride, padding='same') x_shape = [x_shape[0] // x_stride, x_shape[1] // x_stride, channels] elif op_id == 15: br_op = layers.AveragePooling2D(pool_size=5, strides=x_stride, padding='same') x_shape = [x_shape[0] // x_stride, x_shape[1] // x_stride, channels] return br_op, x_shape, x_id_fact_reduce