コード例 #1
0
    def _build_discriminator(self, tensor=None, training=False):

        with tf.variable_scope('discriminator') as scope:
            # set reuse if necessary
            if tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                 scope=scope.name):
                scope.reuse_variables()

            # discriminator base
            tensor = self._build_discriminator_base(tensor, training)

            # final layer
            d_out = 2
            with tf.variable_scope('final.{0}-{1}'.format(
                    tensor.shape[-1], d_out)):
                out_logits = tf.reshape(
                    tf.layers.conv2d(tensor,
                                     d_out,
                                     4,
                                     2,
                                     'valid',
                                     kernel_initializer=init_normal(),
                                     name='conv'), [-1, d_out])

        return tf.nn.softmax(out_logits), out_logits
コード例 #2
0
    def _build_generator(self, tensor=None, training=False, batch_norm=None):
        assert self.n_pixel % 16 == 0, "isize has to be a multiple of 16"
        nfilt = 2000
        csize = 4
        if tensor is None:
            tensor = self.input_n
        if batch_norm is None:
            batch_norm = self.batch_norm_G
        if batch_norm:

            def bn(x, name=None):
                return tf.contrib.layers.batch_norm(x,
                                                    is_training=training,
                                                    renorm=BATCH_RENORM,
                                                    decay=BATCH_NORM_DECAY)
        else:
            bn = tf.identity

        with tf.variable_scope('generator') as scope:
            # set reuse if necessary
            if tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                 scope=scope.name):
                scope.reuse_variables()

            # initial layer
            with tf.variable_scope('initial.{0}-{1}'.format(
                    self.n_noise, nfilt)):
                tensor = tf.nn.relu(
                    bn(tf.layers.conv2d_transpose(
                        tf.reshape(tensor, [-1, 1, 1, self.n_noise]),
                        nfilt,
                        4,
                        2,
                        'valid',
                        use_bias=not batch_norm,
                        kernel_initializer=init_normal(),
                        name='conv'),
                       name='bn'))

            # upscaling layers
            while csize < self.n_pixel // 2:
                with tf.variable_scope('pyramid.{0}-{1}'.format(
                        nfilt, nfilt / 2)):
                    tensor = tf.nn.relu(
                        bn(tf.layers.conv2d_transpose(
                            tensor,
                            nfilt // 2,
                            4,
                            2,
                            'same',
                            use_bias=not batch_norm,
                            kernel_initializer=init_normal(),
                            name='conv'),
                           name='bn'))
                csize *= 2
                nfilt //= 2

            # extra layers
            for it in range(self.n_extra_generator_layers):
                with tf.variable_scope('extra-{0}.{1}'.format(it, nfilt)):
                    tensor = tf.nn.relu(
                        bn(tf.layers.conv2d_transpose(
                            tensor,
                            nfilt,
                            3,
                            1,
                            'same',
                            use_bias=not batch_norm,
                            kernel_initializer=init_normal(),
                            name='conv'),
                           name='bn'))

            # final layer
            with tf.variable_scope('final.{0}-{1}'.format(
                    nfilt, self.n_channel)):
                tensor = tf.layers.conv2d_transpose(
                    tensor,
                    self.n_channel,
                    4,
                    2,
                    'same',
                    activation=tf.tanh,
                    kernel_initializer=init_normal(),
                    name='conv')

            # mask layer
            return tensor
コード例 #3
0
    def _build_discriminator_base(self,
                                  tensor=None,
                                  training=False,
                                  batch_norm=None):
        nfilt = 500
        if tensor is None:
            tensor = self.input_x
        if batch_norm is None:
            batch_norm = self.batch_norm_D
        if batch_norm:

            def bn(tensor, name=None):
                return tf.contrib.layers.batch_norm(tensor,
                                                    is_training=training,
                                                    renorm=BATCH_RENORM,
                                                    decay=BATCH_NORM_DECAY)
        else:
            bn = tf.identity

        # initial layer
        with tf.variable_scope('initial.{0}-{1}'.format(self.n_channel,
                                                        nfilt)):
            tensor = lrelu(
                bn(tf.layers.conv2d(tensor,
                                    nfilt,
                                    4,
                                    2,
                                    'same',
                                    use_bias=not batch_norm,
                                    kernel_initializer=init_normal(),
                                    name='conv'),
                   name='bn'))
        nfilt /= 2
        csize = self.n_pixel / 2

        # extra layers
        for it in range(self.n_extra_discriminator_layers):
            with tf.variable_scope('extra-{0}.{1}'.format(it, nfilt)):
                tensor = lrelu(
                    bn(tf.layers.conv2d(tensor,
                                        nfilt,
                                        3,
                                        1,
                                        'same',
                                        use_bias=not batch_norm,
                                        kernel_initializer=init_normal(),
                                        name='conv'),
                       name='bn'))

        # downscaling layers
        while csize > 4:
            with tf.variable_scope('pyramid.{0}-{1}'.format(nfilt, nfilt * 2)):
                tensor = lrelu(
                    bn(tf.layers.conv2d(tensor,
                                        nfilt * 2,
                                        4,
                                        2,
                                        'same',
                                        use_bias=not batch_norm,
                                        kernel_initializer=init_normal(),
                                        name='conv'),
                       name='bn'))
            nfilt *= 2
            csize /= 2

        return tensor
コード例 #4
0
ファイル: TGAN.py プロジェクト: AchillesTurtle/TGAN
    def _build_discriminator(self,
                             tensor=None,
                             label=None,
                             training=False,
                             batch_norm=None):
        with tf.variable_scope('discriminator') as scope:
            # set reuse if necessary
            if tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                 scope=scope.name):
                scope.reuse_variables()
            nfilt = 512
            # this means that labeled real data is  inputted
            if tensor is None and label is None:
                # add labels to input
                x_shape = self.input_labeled_x.get_shape()
                # WHY??
                #tensor = tf.concat([self.input_labeled_x,
                #                    tf.reshape(self.input_labeled_y, [-1, 1, 1, self.n_class]) * tf.ones(
                #                        [x_shape[0], x_shape[1], x_shape[2], self.n_class])], axis=3)
                tensor = tf.concat([
                    self.input_labeled_x,
                    tf.tile(
                        tf.reshape(self.input_labeled_y,
                                   [-1, 1, 1, self.n_class]),
                        [1, x_shape[1], x_shape[2], 1])
                ],
                                   axis=3)
            elif tensor is None or label is None:
                print('Tensor and label must be both None or both exists')
                raise
            else:
                x_shape = tensor.get_shape()
                print(label.get_shape())
                label_copy = tf.tile(
                    tf.reshape(label, [-1, 1, 1, self.n_class]),
                    [1, x_shape[1], x_shape[2], 1])
                tensor = tf.concat([tensor, label_copy], axis=3)

            if batch_norm is None:
                batch_norm = self.batch_norm_D
            if batch_norm:

                def bn(tensor, name=None):
                    return tf.contrib.layers.batch_norm(tensor,
                                                        is_training=training,
                                                        renorm=BATCH_RENORM,
                                                        decay=BATCH_NORM_DECAY)
            else:
                bn = tf.identity

            # initial layer
            with tf.variable_scope('initial.{0}-{1}'.format(
                    self.n_channel + self.n_class, nfilt)):
                tensor = lrelu(
                    bn(tf.layers.conv2d(tensor,
                                        nfilt,
                                        4,
                                        2,
                                        'same',
                                        use_bias=not batch_norm,
                                        kernel_initializer=init_normal(),
                                        name='conv'),
                       name='bn'))
            nfilt //= 2
            csize = self.n_pixel // 2

            # extra layers
            for it in range(self.n_extra_discriminator_layers):
                with tf.variable_scope('extra-{0}.{1}'.format(it, nfilt)):
                    tensor = lrelu(
                        bn(tf.layers.conv2d(tensor,
                                            nfilt,
                                            3,
                                            1,
                                            'same',
                                            use_bias=not batch_norm,
                                            kernel_initializer=init_normal(),
                                            name='conv'),
                           name='bn'))

            # downscaling layers
            while csize > 4:
                with tf.variable_scope('pyramid.{0}-{1}'.format(
                        nfilt, nfilt * 2)):
                    tensor = lrelu(
                        bn(tf.layers.conv2d(tensor,
                                            nfilt * 2,
                                            4,
                                            2,
                                            'same',
                                            use_bias=not batch_norm,
                                            kernel_initializer=init_normal(),
                                            name='conv'),
                           name='bn'))
                nfilt *= 2
                csize /= 2

                # final layer
                d_out = 2
                with tf.variable_scope('final.{0}-{1}'.format(
                        tensor.shape[-1], d_out)):
                    out_logits = tf.reshape(
                        tf.reduce_mean(tf.layers.conv2d(
                            tensor,
                            d_out,
                            4,
                            2,
                            'valid',
                            kernel_initializer=init_normal(),
                            name='conv'),
                                       axis=3), [-1, d_out])
                    # TODO consider to output the labels instead of softmax&out_logits
        return tf.nn.softmax(out_logits), out_logits
コード例 #5
0
ファイル: TGAN.py プロジェクト: AchillesTurtle/TGAN
    def _build_generator(self,
                         tensor=None,
                         label=None,
                         training=False,
                         batch_norm=None):
        assert self.n_pixel % 16 == 0, "isize has to be a multiple of 16"
        nfilt = 2048
        csize = 4
        if label is None:
            if self.input_y_g is None:
                #TO BE EDITED
                #sample generated images
                batch_size = (self.input_z_g).shape[0]
                label = get_one_hot(np.repeat(np.tile(self.n_class),
                                              (batch_size / self.m_class) + 1),
                                    depth=self.n_class)
                label = label[0:batch_size, :]
            else:
                #get label from input
                label = self.input_y_g
        if tensor is None:
            # add label to noise
            tensor = tf.concat([self.input_z_g, label], 1)
        else:
            # assuming tensor is a specific noise
            tensor = tf.concat([tensor, label], 1)
            #tensor = tf.concat([tensor, tf.one_hot(label, self.n_class)], 1)
        if batch_norm is None:
            batch_norm = self.batch_norm_G
        if batch_norm:

            def bn(x, name=None):
                return tf.contrib.layers.batch_norm(x,
                                                    is_training=training,
                                                    renorm=BATCH_RENORM,
                                                    decay=BATCH_NORM_DECAY)
        else:
            # return the same if bn is not aactivated
            bn = tf.identity

        with tf.variable_scope('generator') as scope:
            # set reuse if necessary
            if tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                 scope=scope.name):
                scope.reuse_variables()

            # initial layer
            with tf.variable_scope('initial.{0}-{1}'.format(
                    self.n_noise + self.n_class, nfilt)):
                tensor = tf.nn.relu(
                    bn(tf.layers.conv2d_transpose(
                        tf.reshape(tensor,
                                   [-1, 1, 1, self.n_noise + self.n_class]),
                        nfilt,
                        4,
                        2,
                        'valid',
                        use_bias=not batch_norm,
                        kernel_initializer=init_normal(),
                        name='conv'),
                       name='bn'))

            # upscaling layers
            while csize < self.n_pixel / 2:
                with tf.variable_scope('pyramid.{0}-{1}'.format(
                        nfilt, nfilt // 2)):
                    tensor = tf.nn.relu(
                        bn(tf.layers.conv2d_transpose(
                            tensor,
                            nfilt // 2,
                            4,
                            2,
                            'same',
                            use_bias=not batch_norm,
                            kernel_initializer=init_normal(),
                            name='conv'),
                           name='bn'))
                csize *= 2
                nfilt //= 2

            # extra layers
            for it in range(self.n_extra_generator_layers):
                with tf.variable_scope('extra-{0}.{1}'.format(it, nfilt)):
                    tensor = tf.nn.relu(
                        bn(tf.layers.conv2d_transpose(
                            tensor,
                            nfilt,
                            3,
                            1,
                            'same',
                            use_bias=not batch_norm,
                            kernel_initializer=init_normal(),
                            name='conv'),
                           name='bn'))
                    # TODO in original DCGAN struct, result is flattenned(3->1), not setting filtercount to 1 (self.n_channel)
            # final layer
            with tf.variable_scope('final.{0}-{1}'.format(
                    nfilt, self.n_channel)):
                tensor = tf.layers.conv2d_transpose(
                    tensor,
                    self.n_channel,
                    4,
                    2,
                    'same',
                    activation=tf.tanh,
                    kernel_initializer=init_normal(),
                    name='conv')

            # mask layer
            return tensor * self.mask, label
コード例 #6
0
ファイル: TGAN.py プロジェクト: AchillesTurtle/TGAN
    def _build_classifier_base(self,
                               tensor=None,
                               training=False,
                               batch_norm=None):
        nfilt = 128
        if tensor is None:
            tensor = self.input_x_c
        if batch_norm is None:
            batch_norm = self.batch_norm_D
        if batch_norm:

            def bn(tensor, name=None):
                return tf.contrib.layers.batch_norm(tensor,
                                                    is_training=training,
                                                    renorm=BATCH_RENORM,
                                                    decay=BATCH_NORM_DECAY)
        else:
            bn = tf.identity

        # tf.layers.conv2d(inputs,filters,kernel_size,strides,padding...)
        def do(tensor, rate=0.5, name=None):
            return tf.contrib.layers.dropout(tensor,
                                             keep_prob=rate,
                                             is_training=training)

        # dropout before layers
        with tf.variable_scope('initial_dropout{0}-{1}'.format(
                self.n_channel, nfilt)):
            tensor = tf.layers.dropout(tensor,
                                       rate=0.2,
                                       training=training,
                                       seed=self.seed,
                                       name='do')

        # initial layer
        for it in range(2):
            with tf.variable_scope('first_part-{0}.{1}-{2}'.format(
                    it, self.n_channel, nfilt)):
                tensor = lrelu(
                    bn(tf.layers.conv2d(tensor,
                                        nfilt,
                                        3,
                                        1,
                                        'same',
                                        use_bias=not batch_norm,
                                        kernel_initializer=init_normal(),
                                        name='conv'),
                       name='bn'))
        with tf.variable_scope('first_part-last{0}-{1}'.format(
                self.n_channel, nfilt)):
            tensor = do(lrelu(
                bn(tf.layers.conv2d(tensor,
                                    nfilt,
                                    3,
                                    2,
                                    'same',
                                    use_bias=not batch_norm,
                                    kernel_initializer=init_normal(),
                                    name='conv'),
                   name='bn')),
                        rate=0.5,
                        name='do')

        nfilt = 256
        for it in range(2):
            with tf.variable_scope('second_part-{0}.{1}-{2}'.format(
                    it, self.n_channel, nfilt)):
                tensor = lrelu(
                    bn(tf.layers.conv2d(tensor,
                                        nfilt,
                                        3,
                                        1,
                                        'same',
                                        use_bias=not batch_norm,
                                        kernel_initializer=init_normal(),
                                        name='conv'),
                       name='bn'))
        with tf.variable_scope('second_part-last{0}-{1}'.format(
                self.n_channel, nfilt)):
            tensor = do(lrelu(
                bn(tf.layers.conv2d(tensor,
                                    nfilt,
                                    3,
                                    2,
                                    'same',
                                    use_bias=not batch_norm,
                                    kernel_initializer=init_normal(),
                                    name='conv'),
                   name='bn')),
                        rate=0.5,
                        name='do')
        nfilt = 512
        with tf.variable_scope('third_part{0}-{1}'.format(
                self.n_channel, nfilt)):
            tensor = lrelu(
                bn(tf.layers.conv2d(tensor,
                                    nfilt,
                                    3,
                                    1,
                                    'same',
                                    use_bias=not batch_norm,
                                    kernel_initializer=init_normal(),
                                    name='conv'),
                   name='bn'))
        nfilt = 256
        with tf.variable_scope('third_part{0}-{1}'.format(
                self.n_channel, nfilt)):
            tensor = lrelu(
                bn(tf.layers.conv2d(tensor,
                                    nfilt,
                                    1,
                                    1,
                                    'same',
                                    use_bias=not batch_norm,
                                    kernel_initializer=init_normal(),
                                    name='conv'),
                   name='bn'))
        nfilt = 128
        with tf.variable_scope('third_part{0}-{1}'.format(
                self.n_channel, nfilt)):
            tensor = lrelu(
                bn(tf.layers.conv2d(tensor,
                                    nfilt,
                                    1,
                                    1,
                                    'same',
                                    use_bias=not batch_norm,
                                    kernel_initializer=init_normal(),
                                    name='conv'),
                   name='bn'))

        with tf.variable_scope('last_layer{0}-{1}'.format(
                self.n_channel, nfilt)):
            tensor = tf.reduce_mean(tensor, [1, 2], name='rm')
            tensor = lrelu(tf.layers.dense(tensor, self.n_class))

        # csize = self.n_pixel / 2

        # extra layers
        # for it in range(self.n_extra_discriminator_layers):
        #    with tf.variable_scope('extra-{0}.{1}'.format(it, nfilt)):
        #        tensor = lrelu(bn(tf.layers.conv2d(tensor, nfilt, 3, 1, 'same', use_bias=not batch_norm,
        #                                           kernel_initializer=init_normal(),
        #                                           name='conv'), name='bn'))

        # downscaling layers
        # while csize > 4:
        #    with tf.variable_scope('pyramid.{0}-{1}'.format(nfilt, nfilt * 2)):
        #        tensor = lrelu(bn(tf.layers.conv2d(tensor, nfilt * 2, 4, 2, 'same', use_bias=not batch_norm,
        #                                           kernel_initializer=init_normal(),
        #                                           name='conv'), name='bn'))
        #    nfilt *= 2
        #    csize /= 2

        return tensor