def __call__(self, x, is_training, reuse):
        # return only logits
        h = x
        with tf.variable_scope(self.name_scope, reuse=reuse):
            for i, (in_dim, out_dim) in enumerate(
                    zip(self.layer_list, self.layer_list[1:-1])):
                h = linear_layer(h, in_dim, out_dim, i)
                #h = batch_norm(h, i, is_training=is_training)
                h = lrelu(h)

            ret = linear_layer(h, self.layer_list[-2], self.layer_list[-1],
                               'output')
        return ret
Exemplo n.º 2
0
    def __call__(self, x, is_training, reuse):
        h = x
        with tf.variable_scope(self.name_scope, reuse=reuse):
            for i, (in_dim, out_dim) in enumerate(
                    zip(self.layer_list, self.layer_list[1:-1])):
                h = linear_layer(h, in_dim, out_dim, i)
                h = batch_norm(h, i, is_training=is_training)
                h = lrelu(h)

            mu = linear_layer(h, self.layer_list[-2], self.layer_list[-1],
                              'mu')
            log_sigma = linear_layer(h, self.layer_list[-2],
                                     self.layer_list[-1], 'log_sigma')

            return mu, log_sigma
Exemplo n.º 3
0
    def set_model(self, z, labels, batch_size, is_training, reuse=False):

        # reshape z
        with tf.variable_scope(self.name_scope_reshape, reuse=reuse):
            h = linear_layer(z, self.z_dim,
                             self.in_dim * self.in_dim * self.layer_chanels[0],
                             'reshape')
            h = batch_norm(h, 'reshape', is_training)
            h = lrelu(h)

        h_z = tf.reshape(h,
                         [-1, self.in_dim, self.in_dim, self.layer_chanels[0]])
        # reshape labels
        with tf.variable_scope(self.name_scope_label, reuse=reuse):
            h = linear_layer(z, self.z_dim,
                             self.in_dim * self.in_dim * self.layer_chanels[0],
                             'label')
            h = batch_norm(h, 'label', is_training)
            h = lrelu(h)

        # concat
        h_label = tf.reshape(
            h, [-1, self.in_dim, self.in_dim, self.layer_chanels[0]])
        h = tf.concat([h_z, h_label], 3)

        # deconvolution
        layer_num = len(self.layer_chanels) - 1
        with tf.variable_scope(self.name_scope_deconv, reuse=reuse):
            for i, (in_chan, out_chan) in enumerate(
                    zip(self.layer_chanels, self.layer_chanels[1:])):
                deconved = deconv_layer(inputs=h,
                                        out_shape=[
                                            batch_size,
                                            self.in_dim * 2**(i + 1),
                                            self.in_dim * 2**(i + 1), out_chan
                                        ],
                                        filter_width=5,
                                        filter_hight=5,
                                        stride=2,
                                        l_id=i)
                if i == layer_num - 1:
                    h = tf.nn.tanh(deconved)
                else:
                    bn_deconved = batch_norm(deconved, i, is_training)
                    #h = tf.nn.relu(bn_deconved)
                    h = lrelu(bn_deconved)

        return h
Exemplo n.º 4
0
    def set_model(self, figs, is_training, reuse=False):
        # return only logits

        h = figs

        # convolution
        with tf.variable_scope(self.name_scope_conv, reuse=reuse):
            for i, (in_chan, out_chan) in enumerate(
                    zip(self.layer_chanels, self.layer_chanels[1:])):
                if i == 0:
                    conved = conv_layer(inputs=h,
                                        out_num=out_chan,
                                        filter_width=5,
                                        filter_hight=5,
                                        stride=1,
                                        l_id=i)

                    h = tf.nn.relu(conved)
                    #h = lrelu(conved)
                else:
                    conved = conv_layer(inputs=h,
                                        out_num=out_chan,
                                        filter_width=5,
                                        filter_hight=5,
                                        stride=2,
                                        l_id=i)

                    bn_conved = batch_norm(conved, i, is_training)
                    h = tf.nn.relu(bn_conved)
                    #h = lrelu(bn_conved)

        feature_image = h

        # full connect
        dim = get_dim(h)
        h = tf.reshape(h, [-1, dim])

        with tf.variable_scope(self.name_scope_fc, reuse=reuse):
            h = linear_layer(h, dim, self.fc_dim, 'fc')
            h = batch_norm(h, 'fc', is_training)
            h = tf.nn.relu(h)

            h = linear_layer(h, self.fc_dim, self.class_num, 'fc2')

        return h, feature_image
Exemplo n.º 5
0
    def set_model(self, figs, is_training, reuse = False):

        u'''
        return only logits.
        '''
        
        h = figs
        
        # convolution
        with tf.variable_scope(self.name_scope, reuse = reuse):
            for i, (in_dim, out_dim) in enumerate(zip(self.layer_list, self.layer_list[1:])):
                h = linear_layer(h, in_dim, out_dim, i)
                h = batch_norm(h, i, is_training)
                h = tf.nn.relu(h)
            dim = get_dim(h)
            mu = linear_layer(h, dim, self.z_dim, 'mu')
            log_sigma = linear_layer(h, dim, self.z_dim, 'log_sigma')
        return mu, log_sigma
Exemplo n.º 6
0
 def set_model(self, z, y, is_training, reuse = False):
     # return only logits
     
     h = tf.concat([z, y], 1)
     
     with tf.variable_scope(self.name_scope, reuse = reuse):
         for i, (in_dim, out_dim) in enumerate(zip(self.layer_list, self.layer_list[1:])):
             ret = linear_layer(h, in_dim, out_dim, i)
             h = batch_norm(ret, i, is_training)
             h = tf.nn.relu(h)            
     return ret
Exemplo n.º 7
0
    def set_model(self, figs, labels, is_training, reuse=False):
        fig_shape = figs.get_shape().as_list()
        height, width = fig_shape[1:3]
        class_num = get_dim(labels)
        with tf.variable_scope(self.name_scope_label, reuse=reuse):
            tmp = linear_layer(labels, class_num, height * width, 'reshape')
            tmp = tf.reshape(tmp, [-1, height, width, 1])
        h = tf.concat((figs, tmp), 3)

        # convolution
        with tf.variable_scope(self.name_scope_conv, reuse=reuse):
            for i, (in_chan, out_chan) in enumerate(
                    zip(self.layer_chanels, self.layer_chanels[1:])):

                conved = conv_layer(inputs=h,
                                    out_num=out_chan,
                                    filter_width=5,
                                    filter_hight=5,
                                    stride=2,
                                    l_id=i)

                if i == 0:
                    h = tf.nn.relu(conved)
                    #h = lrelu(conved)
                else:
                    bn_conved = batch_norm(conved, i, is_training)
                    h = tf.nn.relu(bn_conved)
                    #h = lrelu(bn_conved)
        # full connect
        dim = get_dim(h)
        h = tf.reshape(h, [-1, dim])

        with tf.variable_scope(self.name_scope_fc, reuse=reuse):
            h = linear_layer(h, dim, self.fc_dim, 'fc')
            h = batch_norm(h, 'en_fc_bn', is_training)
            h = tf.nn.relu(h)

            mu = linear_layer(h, self.fc_dim, self.z_dim, 'mu')
            log_sigma = linear_layer(h, self.fc_dim, self.z_dim, 'sigma')

        return mu, log_sigma
Exemplo n.º 8
0
    def set_model(self, z,  is_training, reuse = False):
        u'''
        return only logits. not sigmoid(logits).
        '''
        h = z
        with tf.variable_scope(self.name_scope, reuse = reuse):
            for i, (in_dim, out_dim) in enumerate(zip(self.layer_list, self.layer_list[1:])):
                ret = linear_layer(h, in_dim, out_dim, i)
                h = batch_norm(ret, i, is_training)
                h = tf.nn.relu(h)

        return ret