Esempio n. 1
0
def generator_resnet(zs,
                     labels,
                     gf_dim,
                     num_classes,
                     kernel=(3, 3),
                     strides=(2, 2),
                     dilations=(1, 1),
                     pooling='avg',
                     scope_name='Generator',
                     reuse=False):
    with tf.variable_scope(scope_name) as scope:
        if reuse:
            scope.reuse_variables()
        act0 = ops.linear(zs, gf_dim * 3 * 3 * 8, 'g_h0')
        act0 = tf.reshape(act0, [-1, 3, 3, gf_dim * 8])
        act1 = block(act0, gf_dim * 8, 'g_block1')  # 6 * 6
        act2 = block(act1, gf_dim * 4, 'g_block2')  # 12 * 12
        tf.summary.histogram(act2.name, act2)
        # act3 = block(act2, target_class, gf_dim * 2, 'g_block3')  # 3 * 48
        # act4 = block(act3, target_class, gf_dim * 2, 'g_block4')  # 3 * 96
        act5 = block(act2, gf_dim, 'g_block5')  # 24 * 24
        bn = ops.BatchNorm(name='g_bn')

        act5 = tf.nn.relu(bn(act5))
        act6 = ops.conv2d(act5, 1, 3, 3, 1, 1, name='g_conv_last')
        out = tf.nn.tanh(act6)
        tf.summary.histogram(out.name, out)
        return out
Esempio n. 2
0
 def __init__(self, config, shape, num_classes=None, scope_name=None):
     super(ResnetGenerator, self).__init__(config, shape, num_classes,
                                           scope_name)
     self.strides = self.get_strides()
     self.number_of_layers = len(self.strides)
     self.starting_dim = self.dim * (2**self.number_of_layers)
     self.get_initial_shape(config)
     self.final_bn = ops.BatchNorm(name='g_bn')
Esempio n. 3
0
def generator_resnet(zs,
                     labels,
                     gf_dim,
                     num_classes,
                     kernel=(3, 3),
                     strides=[(2, 2)],
                     dilations=(1, 1),
                     pooling='avg',
                     scope_name='Generator',
                     reuse=False,
                     output_shape=[8, 128, 1],
                     act=tf.nn.leaky_relu):
    with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:

        height_d, width_d = get_dimentions_factors(strides)
        number_of_layers = len(strides)
        hidden_dim = gf_dim * (2**(number_of_layers - 1))

        c_h = int(output_shape[0] / height_d)
        c_w = int((output_shape[1] / width_d))
        h = ops.snlinear(zs, c_h * c_w * hidden_dim, name='noise_linear')
        h = tf.reshape(h, [-1, c_h, c_w, hidden_dim])
        print("COMPRESSED TO: ", h.shape)

        with tf.variable_scope("up", reuse=tf.AUTO_REUSE):
            for layer_id in range(number_of_layers):
                print(h.shape)
                block_name = 'up_block{}'.format(number_of_layers -
                                                 (layer_id + 1))
                dilation_rate = (dilations[0]**(number_of_layers - layer_id),
                                 dilations[1]**(number_of_layers - layer_id))
                h = sn_block(h, hidden_dim, block_name, get_kernel(h, kernel),
                             strides[layer_id], dilation_rate, act, pooling,
                             'VALID')
                tf.summary.histogram(block_name, h, family=scope_name)
                if layer_id == number_of_layers - 2:
                    h = attention(h, hidden_dim, sn=True, reuse=reuse)
                    tf.summary.histogram("up_attention", h, family=scope_name)
                hidden_dim = hidden_dim / strides[layer_id][1]

        bn = ops.BatchNorm(name='g_bn')
        h_act = tf.nn.leaky_relu(bn(h), name="h_act")
        if output_shape[2] == 1:
            out = tf.nn.tanh(ops.snconv2d(h_act,
                                          1, (output_shape[0], 1),
                                          name='last_conv'),
                             name="generated")
        else:
            out = tf.nn.tanh(ops.snconv2d(h_act, 3, (1, 1), name='last_conv'),
                             name="generated")
        tf.summary.histogram("Generated_results", out, family=scope_name)
        print("GENERATED SHAPE", out.shape)
        return out
Esempio n. 4
0
    def network(self, z, labels, reuse):
        height_d, width_d = get_dimentions_factors(self.strides)
        number_of_layers = len(self.strides)
        hidden_dim = self.dim * (2**(number_of_layers - 1))
        c_h = int(self.height / height_d)
        c_w = int((self.length / width_d))
        h = ops.snlinear(z, c_h * c_w * hidden_dim, name='noise_linear')
        h = tf.reshape(h, [-1, c_h, c_w, hidden_dim])
        print("COMPRESSED TO: ", h.shape)

        with tf.variable_scope("up", reuse=reuse):
            for layer_id in range(number_of_layers):
                print(h.shape)
                block_name = 'up_block{}'.format(number_of_layers -
                                                 (layer_id + 1))
                dilation_rate = (1, 1)
                h = sn_block(h, hidden_dim, block_name,
                             get_kernel(h,
                                        self.kernel), self.strides[layer_id],
                             dilation_rate, self.act, self.pooling, 'VALID')
                tf.summary.histogram(block_name, h, family=self.scope_name)
                if layer_id == number_of_layers - 2:
                    h = attention(h, hidden_dim, sn=True, reuse=reuse)
                    tf.summary.histogram("up_attention",
                                         h,
                                         family=self.scope_name)
                hidden_dim = hidden_dim / self.strides[layer_id][1]

        bn = ops.BatchNorm(name='g_bn')
        h_act = leaky_relu(bn(h), name="h_act")
        if self.output_shape[2] == 1:
            out = tf.nn.tanh(ops.snconv2d(h_act,
                                          1, (self.output_shape[0], 1),
                                          name='last_conv'),
                             name="generated")
        else:
            out = tf.nn.tanh(ops.snconv2d(h_act, 21, (1, 1), name='last_conv'),
                             name="generated")
        tf.summary.histogram("Generated_results", out, family=self.scope_name)
        print("GENERATED SHAPE", out.shape)
        return out
Esempio n. 5
0
def generator_1d(zs,
                 labels,
                 gf_dim,
                 num_classes,
                 kernel=(3, 3),
                 strides=(2, 2),
                 dilations=(1, 1),
                 pooling='avg',
                 scope_name='Generator',
                 reuse=False,
                 output_shape=[8, 128, 1],
                 act=tf.nn.leaky_relu):
    with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:

        # strides_schedule = get_strides_schedule(input_width)
        strides_schedule = [(1, 2), (1, 2), (1, 2), (1, 2)]
        height_d, width_d = get_dimentions_factors(strides_schedule)

        number_of_layers = len(strides_schedule)
        hidden_dim = gf_dim * (2**(number_of_layers - 1))
        kernel = (1, 5)
        c_h = 1  # int(embedding_height / height_d)
        c_w = int((output_shape[1] / width_d))
        h = ops.snlinear(zs, c_h * int(c_w) * hidden_dim, name='noise_linear')
        tf.summary.histogram("noise_snlinear", h, family=scope_name)
        h = tf.reshape(h, shape=[-1, c_h, int(c_w), hidden_dim])
        # new_shape = [h.get_shape().as_list()[0], h.get_shape().as_list()[1],
        #              h.get_shape().as_list()[2] * 2, hidden_dim]
        # h = ops.sndeconv2d(h, new_shape, (c_h, 3), name='noise_expand', strides=(1, 2))
        # h = act(h)
        tf.summary.histogram("before_blocks", h, family=scope_name)
        print("COMPRESSED TO: ", h.shape)

        with tf.variable_scope("up", reuse=tf.AUTO_REUSE):
            for layer_id in range(number_of_layers):
                block_name = 'up_block{}'.format(number_of_layers -
                                                 (layer_id + 1))
                # dilation_rate = (dilations[0] ** (number_of_layers - layer_id),
                #                  dilations[1] ** (number_of_layers - layer_id))
                dilation_rate = (1, 1)
                h = sn_block(h, hidden_dim, block_name, kernel,
                             strides_schedule[layer_id], dilation_rate, act,
                             pooling, 'VALID')
                tf.summary.histogram(block_name, h, family=scope_name)
                if layer_id == number_of_layers - 2:
                    h = attention(h, hidden_dim, sn=True, reuse=reuse)
                    tf.summary.histogram("up_attention", h, family=scope_name)
                hidden_dim = hidden_dim / 2

        bn = ops.BatchNorm(name='g_bn')
        h_act = tf.nn.leaky_relu(bn(h), name="h_act")
        h_act = tf.reshape(h_act, [
            -1, output_shape[0], output_shape[1],
            int(hidden_dim / output_shape[0]) * 2
        ])
        h_act = ops.snconv2d(h_act,
                             int(hidden_dim / output_shape[0]) * 2,
                             (output_shape[0], 1),
                             name='embedding_height')
        out = tf.nn.tanh(ops.snconv2d(h_act,
                                      1, (output_shape[0], 1),
                                      name='last_conv'),
                         name="generated")
        tf.summary.histogram("Generated_results", out, family=scope_name)
        print("GENERATED SHAPE", out.shape)
        return out
Esempio n. 6
0
def _block(x,
           labels,
           out_channels,
           num_classes,
           name,
           conv=ops.snconv2d,
           kernel=(3, 3),
           strides=(1, 1),
           dilations=(1, 1),
           act=leaky_relu,
           pooling='avg',
           padding='SAME'):
    """Builds the residual blocks used in the generator.

    Compared with block, it takes into account that there are different classes.

    Args:
      x: The 4D input vector.
      labels: The conditional labels in the generation.
      out_channels: Number of features in the output layer.
      num_classes: Number of classes in the labels.
      name: Scope name
      conv: Convolution function. Options conv2d or snconv2d
      kernel: The height and width of the convolution kernel filter (Default value = (3, 3))
      strides: The height and width of convolution strides (Default value = (1, 1))
      dilations: The height and width of convolution dilation (Default value = (1, 1))
      act: The activation function used in the block. (Default value = leaky_relu)
      pooling: Strategy of pooling. Default: average pooling. (Default value = 'avg')
      padding:  Type of padding  (Default value = 'SAME')
      conditional: A flag that determines if conditional batch norm should be used.
      If false, standard batch norm is used. (Default value = False)

    Returns:
      A tensor representing the output of the operation.

    """
    with tf.variable_scope(name):
        x_0 = x
        if num_classes is not None:
            bn0 = ops.ConditionalBatchNorm(num_classes, name='cbn_0')
            bn1 = ops.ConditionalBatchNorm(num_classes, name='cbn_1')
            x = bn0(x, labels)
        else:
            bn0 = ops.BatchNorm(name='bn_0')  #TODO
            bn1 = ops.BatchNorm(name='bn_1')
            x = x
        x = act(x)
        x = up_sampling(x, pooling, out_channels, conv, kernel, strides,
                        'conv1', padding)
        if num_classes is not None:
            x = bn1(x, labels)
        else:
            x = x
        x = act(x)
        x = conv(x,
                 out_channels,
                 kernel,
                 dilations=dilations,
                 name='conv2',
                 padding=padding)
        x_0 = up_sampling(x_0, pooling, out_channels, conv, kernel, strides,
                          'conv3', padding)
        return x_0 + x