Exemplo n.º 1
0
    def encoder(self, x, training=True, reuse=None, name=None):

        # [None, 28, 28, 1]  -->  [None, 14, 14, 64]
        h = conv2d(x, 64, kernel_size=4, strides=2, activation=tf.nn.leaky_relu, reuse=reuse, name='e_conv_1')

        # [None, 14, 14, 64] -->  [None, 7, 7, 128]
        h = conv2d(h, 128, kernel_size=4, strides=2, reuse=reuse, name='e_conv_2')
        h = batch_norm(h, training=training, reuse=reuse, name='e_bn_1')
        h = tf.nn.leaky_relu(h)

        # [None, 7, 7, 128]  -->  [None, 7*7*128]
        h = tf.reshape(h, [-1, 7*7*128])

        # [None, 7*7*128] -->  [None, 1024]
        h = dense(h, 1024, reuse=reuse, name='e_dense_1')
        h = batch_norm(h, training=training, reuse=reuse, name='e_bn_2')
        h = tf.nn.leaky_relu(h)

        # [None, 1024] -->  [None, 2*self.z_dim]
        h = dense(h, 2*self.z_dim, reuse=reuse, name='e_dense_2')

        # Assign names to final outputs
        mean = tf.identity(h[:,:self.z_dim], name=name+"_mean")
        log_sigma = tf.identity(h[:,self.z_dim:], name=name+"_log_sigma")
        return mean, log_sigma
Exemplo n.º 2
0
	def make_single_graph(self, x, lbls):


		conv1 =  layers.max_pool1d(
					layers.batch_norm(
						layers.conv1dLayer(x, filterSize = 19, outputDim = 300, stride = 1, name = "conv1"),
					self.is_train) , size = 3, stride = 3, name = "pool1")

		conv2 =  layers.max_pool1d(
					layers.batch_norm(
						layers.conv1dLayer(self.conv1, filterSize = 11, outputDim = 200, stride =1, name = "conv2"),
					self.is_train), size = 4, stride = 4, name = "pool2")

		conv3 =  layers.max_pool1d(
					layers.batch_norm(
						layers.conv1dLayer(self.conv2, filterSize = 7, outputDim = 200, stride = 1, name = "conv3"),
					self.is_train), size = 4, stride = 4, name = "pool3")

		fc1   = layers.batch_norm(layers.denseLayer(self.conv3, outputDim = 1000, name = "fc1"), self.is_train)
		fc2   = layers.batch_norm(layers.denseLayer(self.fc1, outputDim = 1000, name = "fc2"), self.is_train)

		y_conv = layers.readoutLayer(self.fc2, outputDim = 2, name = "readout")
		loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = lbls, logits = y_conv))
		# train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(loss)

		predictions = tf.argmax(y_conv, 1)

		accuracy    = tf.metrics.accuracy(tf.argmax(lbls,1), predictions)
		recall      = tf.metrics.recall(tf.argmax(lbls,1), predictions)
		precision   = tf.metrics.precision(tf.argmax(lbls,1), predictions)
		auc         = tf.metrics.precision(tf.argmax(lbls,1), predictions)
		msqe        = tf.metrics.mean_squared_error(tf.argmax(lbls,1), predictions)


		return accuracy, loss, recall, precision, auc, msqe
Exemplo n.º 3
0
def generator(n_samples, noise=None, dim=64):
    with tf.variable_scope('generator', reuse=tf.AUTO_REUSE):
        if noise is None:
            noise = tf.random_normal([n_samples, 128])

        x = linear('input', 128, 8 * 4 * 4 * dim, noise)
        x = tf.reshape(x, [-1, 8 * dim, 4, 4])
        x = batch_norm('bn1', x)
        x = tf.nn.relu(x)

        x = conv2d_transpose('c2', 8 * dim, 4 * dim, 5, x)
        x = batch_norm('bn2', x)
        x = tf.nn.relu(x)

        x = conv2d_transpose('c3', 4 * dim, 2 * dim, 5, x)
        x = batch_norm('bn3', x)
        x = tf.nn.relu(x)

        x = conv2d_transpose('c4', 2 * dim, dim, 5, x)
        x = batch_norm('bn4', x)
        x = tf.nn.relu(x)

        x = conv2d_transpose('c5', dim, 3, 5, x)
        x = tf.tanh(x)

        return tf.reshape(x, [-1, 3 * dim * dim])
Exemplo n.º 4
0
    def decoder(self, z, training=True, reuse=None, name=None):

        # [None, z_dim]  -->  [None, 1024]
        h = dense(z, 1024, reuse=reuse, name='d_dense_1')
        h = batch_norm(h, training=training, reuse=reuse, name='d_bn_1')
        h = tf.nn.relu(h)
        
        # [None, 1024]  -->  [None, 7*7*128]
        h = dense(h, self.min_res*self.min_res*self.min_chans, reuse=reuse, name='d_dense_2')
        h = batch_norm(h, training=training, reuse=reuse, name='d_bn_2')
        h = tf.nn.relu(h)

        # [None, 7*7*128]  -->  [None, 7, 7, 128]
        h = tf.reshape(h, [-1, self.min_res, self.min_res, self.min_chans])

        # [None, 7, 7, 128]  -->  [None, 14, 14, 64]
        h = conv2d_transpose(h, 64, kernel_size=4, strides=2, reuse=reuse, name='d_tconv_1')
        h = batch_norm(h, training=training, reuse=reuse, name='d_bn_3')
        h = tf.nn.relu(h)
                        
        # [None, 14, 14, 64]  -->  [None, 28, 28, 1]
        h = conv2d_transpose(h, 1, kernel_size=4, strides=2, activation=tf.nn.sigmoid, reuse=reuse, name='d_tconv_2')
                        
        # Assign name to final output
        return tf.identity(h, name=name)
Exemplo n.º 5
0
 def create_network(self, input, training, variable_scope):
     with tf.variable_scope(variable_scope):
         x = input
         with tf.variable_scope(variable_scope + "_bn0"):
             x = layers.batch_norm(x, training, variable_scope + "_bn0",
                                   tf.nn.relu)
         x = tf.layers.dense(
             input,
             LAYER1_SIZE,
             kernel_initializer=tf.random_normal_initializer(stddev=0.1))
         with tf.variable_scope(variable_scope + "_bn1"):
             x = layers.batch_norm(x, training, variable_scope + "_bn1",
                                   tf.nn.relu)
         x = tf.layers.dense(
             x,
             LAYER2_SIZE,
             kernel_initializer=tf.random_normal_initializer(stddev=0.1))
         with tf.variable_scope(variable_scope + "_bn2"):
             x = layers.batch_norm(x, training, variable_scope + "_bn2",
                                   tf.nn.relu)
         x = tf.layers.dense(
             x,
             self.action_dim,
             activation=tf.nn.tanh,
             kernel_initializer=tf.random_normal_initializer(stddev=0.1))
     return x
Exemplo n.º 6
0
def fc_network(x, pretrained=False, weights=None, biases=None, activation='swish', scope='fc_network', bn_phaze=False,
               keep_prob=0.5):
    with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
        if activation == 'swish':
            act_func = util.swish
        elif activation == 'relu':
            act_func = tf.nn.relu
        else:
            act_func = tf.nn.sigmoid

        g_fc_layer1 = layers.fc(x, g_fc_layer1_dim, use_bias=False, scope='g_fc_layer1')
        g_fc_layer1 = layers.batch_norm(g_fc_layer1, bn_phaze, scope='g_fc_layer1_bn')
        g_fc_layer1 = act_func(g_fc_layer1)
        g_fc_layer1 = tf.nn.dropout(g_fc_layer1, keep_prob=keep_prob)

        g_fc_layer2 = layers.fc(g_fc_layer1, g_fc_layer2_dim, use_bias=False, scope='g_fc_layer2')
        g_fc_layer2 = layers.batch_norm(g_fc_layer2, bn_phaze, scope='g_fc_layer2_bn')
        g_fc_layer2 = act_func(g_fc_layer2)
        g_fc_layer2 = tf.nn.dropout(g_fc_layer2, keep_prob=keep_prob)

        g_fc_layer3 = layers.fc(g_fc_layer2, g_fc_layer3_dim, use_bias=False, scope='g_fc_layer3')
        g_fc_layer3 = layers.batch_norm(g_fc_layer3, bn_phaze, scope='g_fc_layer3_bn')
        g_fc_layer3 = act_func(g_fc_layer3)
        g_fc_layer3 = tf.nn.dropout(g_fc_layer3, keep_prob=keep_prob)

        return g_fc_layer3
Exemplo n.º 7
0
    def discriminator2(self, x, reuse=None):
        with tf.variable_scope("discriminator2", reuse=tf.AUTO_REUSE):
            #2º discriminator
            x_2 = lay.conv2d(x, f=64, name='d-conv2d-1')
            x_2 = lay.batch_norm(x_2)
            x_2 = tf.nn.leaky_relu(x_2, alpha=0.1)

            x_2 = lay.conv2d(x_2, f=128, name='d-conv2d-0')
            x_2 = lay.batch_norm(x_2)
            out = tf.nn.leaky_relu(x_2, alpha=0.1)

            return out
Exemplo n.º 8
0
 def dwconvLayer(self,
                 kernel,
                 multi,
                 stride=1,
                 pad='SAME',
                 activation=-1,
                 batch_norm=False,
                 weight=None):
     with tf.variable_scope('dwconv_' + str(self.layernum)):
         if isinstance(kernel, list):
             kernel = kernel
         else:
             kernel = [kernel, kernel]
         self.result = L.conv2Ddw(self.result,
                                  self.inpsize[3],
                                  kernel,
                                  multi,
                                  'dwconv_' + str(self.layernum),
                                  stride=stride,
                                  pad=pad,
                                  weight=weight)
         if batch_norm:
             self.result = L.batch_norm(self.result,
                                        'batch_norm_' + str(self.layernum))
         self.layernum += 1
         self.activate(activation)
         self.inpsize = self.result.get_shape().as_list()
     return [self.result, list(self.inpsize)]
Exemplo n.º 9
0
    def discriminator1(self, x, reuse=None):
        with tf.variable_scope("discriminator1", reuse=tf.AUTO_REUSE):
            x_1 = tf.layers.flatten(x)

            x_1 = tf.layers.dense(x_1, units=7 * 7 * 128, name='d-fc-2')
            x_1 = lay.batch_norm(x_1)
            x_1 = tf.nn.leaky_relu(x_1, alpha=0.1)

            x_1 = tf.layers.dense(x_1, units=1024, name='d-fc-1')
            x_1 = lay.batch_norm(x_1)
            x_1 = tf.nn.leaky_relu(x_1, alpha=0.1)

            d1 = tf.layers.dense(x_1, units=1, name='d-fc-0')
            prob = tf.nn.sigmoid(d1)

            return prob
Exemplo n.º 10
0
 def dwconvLayer(self,
                 kernel,
                 multi,
                 stride=1,
                 pad='SAME',
                 activation=-1,
                 batch_norm=False):
     with tf.variable_scope('dwconv_' + str(self.layernum)):
         if isinstance(kernel, list):
             kernel = kernel
         else:
             kernel = [kernel, kernel]
         self.result = L.conv2Ddw(self.result,
                                  self.inpsize[3],
                                  kernel,
                                  multi,
                                  'dwconv_' + str(self.layernum),
                                  stride=stride,
                                  pad=pad)
         if batch_norm:
             self.result = L.batch_norm(self.result,
                                        'batch_norm_' + str(self.layernum))
         self.layernum += 1
         if pad == 'VALID':
             self.inpsize[1] -= kernel[0] - stride
             self.inpsize[2] -= kernel[1] - stride
         self.inpsize[1] = self.inpsize[1] // stride
         self.inpsize[2] = self.inpsize[2] // stride
         self.inpsize[3] = self.inpsize[3] * multi
         self.activate(activation)
     return [self.result, list(self.inpsize)]
Exemplo n.º 11
0
def first_block(x,
                target_size,
                noise_dim,
                upsampling='deconv',
                normalization='batch',
                is_training=True):
    if upsampling == 'deconv':
        _x = reshape(x, (1, 1, noise_dim))
        _x = conv2d_transpose(_x,
                              1024,
                              target_size,
                              strides=(1, 1),
                              padding='valid')
    elif upsampling == 'dense':
        _x = dense(x, target_size[0] * target_size[1] * 1024)
        _x = reshape(_x, (target_size[1], target_size[0], 1024))
    else:
        raise ValueError

    if normalization == 'batch':
        _x = batch_norm(_x, is_training=is_training)
    elif normalization == 'layer':
        _x = layer_norm(_x, is_training=is_training)
    elif normalization is None:
        pass
    else:
        raise ValueError
    _x = activation(_x, 'relu')
    return _x
Exemplo n.º 12
0
 def dwconvLayer(self,
                 kernel,
                 multi,
                 stride=1,
                 pad='SAME',
                 activation=-1,
                 batch_norm=False,
                 weight=None,
                 usebias=True):
     with tf.variable_scope('dwconv_' + str(self.layernum)):
         if isinstance(kernel, list):
             kernel = kernel
         else:
             kernel = [kernel, kernel]
         self.result = L.conv2Ddw(self.result,
                                  self.inpsize[3],
                                  kernel,
                                  multi,
                                  'dwconv_' + str(self.layernum),
                                  stride=stride,
                                  pad=pad,
                                  weight_data=weight,
                                  usebias=usebias)
         if batch_norm:
             self.result = L.batch_norm(self.result,
                                        'batch_norm_' + str(self.layernum),
                                        training=self.bntraining,
                                        epsilon=self.epsilon)
         self.layernum += 1
         self.inpsize = self.result.get_shape().as_list()
         self.activate(activation)
     return self.result
Exemplo n.º 13
0
 def batch_norm(self):
     with tf.variable_scope('batch_norm' + str(self.layernum)):
         self.result = L.batch_norm(self.result,
                                    'batch_norm_' + str(self.layernum),
                                    training=self.bntraining,
                                    epsilon=self.epsilon)
     return self.result
Exemplo n.º 14
0
 def convLayer(self,
               size,
               outchn,
               stride=1,
               pad='SAME',
               activation=-1,
               batch_norm=False,
               layerin=None):
     with tf.variable_scope('conv_' + str(self.layernum)):
         if isinstance(size, list):
             kernel = size
         else:
             kernel = [size, size]
         if layerin != None:
             self.result = layerin[0]
             self.inpsize = list(layerin[1])
         self.result = L.conv2D(self.result,
                                kernel,
                                outchn,
                                'conv_' + str(self.layernum),
                                stride=stride,
                                pad=pad)
         self.varlist = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
         if batch_norm:
             self.result = L.batch_norm(self.result,
                                        'batch_norm_' + str(self.layernum))
         self.layernum += 1
         if pad == 'VALID':
             self.inpsize[1] -= kernel[0] - stride
             self.inpsize[2] -= kernel[1] - stride
         self.inpsize[1] = self.inpsize[1] // stride
         self.inpsize[2] = self.inpsize[2] // stride
         self.inpsize[3] = outchn
         self.activate(activation)
     return [self.result, list(self.inpsize)]
Exemplo n.º 15
0
    def discriminator(self,input, reuse = True):
        depth = [64,128,256,512,1]
        with tf.variable_scope("Discriminator", reuse = reuse):
            with tf.variable_scope("d_1", reuse= reuse):
                net = lrelu(layers.batch_norm(layers.dc_conv(input, depth[0],'d_w1')))
            with tf.variable_scope("d_2", reuse=reuse):
                net = lrelu(layers.batch_norm(layers.dc_conv(net, depth[1],'d_w2')))
            with tf.variable_scope("d_3",reuse=reuse):
                net = lrelu(layers.batch_norm(layers.dc_conv(net, depth[2],'d_w3')))
            with tf.variable_scope("d_4", reuse= reuse):
                net = lrelu(layers.batch_norm(layers.dc_conv(net, depth[3],'d_w4')))
            with tf.variable_scope("d_5", reuse = reuse):
                net = layers.flatten(net)
                net = layers.dc_dense(net, 1,name = "d_fc")

        return net
Exemplo n.º 16
0
def img_conv_group(input,
                   conv_num_filter,
                   pool_size,
                   conv_padding=1,
                   conv_filter_size=3,
                   conv_act=None,
                   param_attr=None,
                   conv_with_batchnorm=False,
                   conv_batchnorm_drop_rate=0.0,
                   pool_stride=1,
                   pool_type=None,
                   use_cudnn=True,
                   use_mkldnn=False):
    """
    Image Convolution Group, Used for vgg net.
    """
    tmp = input
    assert isinstance(conv_num_filter, list) or \
        isinstance(conv_num_filter, tuple)

    def __extend_list__(obj):
        if not hasattr(obj, '__len__'):
            return [obj] * len(conv_num_filter)
        else:
            return obj

    conv_padding = __extend_list__(conv_padding)
    conv_filter_size = __extend_list__(conv_filter_size)
    param_attr = __extend_list__(param_attr)
    conv_with_batchnorm = __extend_list__(conv_with_batchnorm)
    conv_batchnorm_drop_rate = __extend_list__(conv_batchnorm_drop_rate)

    for i in xrange(len(conv_num_filter)):
        local_conv_act = conv_act
        if conv_with_batchnorm[i]:
            local_conv_act = None

        tmp = layers.conv2d(input=tmp,
                            num_filters=conv_num_filter[i],
                            filter_size=conv_filter_size[i],
                            padding=conv_padding[i],
                            param_attr=param_attr[i],
                            act=local_conv_act,
                            use_cudnn=use_cudnn,
                            use_mkldnn=use_mkldnn)

        if conv_with_batchnorm[i]:
            tmp = layers.batch_norm(input=tmp, act=conv_act, in_place=True)
            drop_rate = conv_batchnorm_drop_rate[i]
            if abs(drop_rate) > 1e-5:
                tmp = layers.dropout(x=tmp, dropout_prob=drop_rate)

    pool_out = layers.pool2d(input=tmp,
                             pool_size=pool_size,
                             pool_type=pool_type,
                             pool_stride=pool_stride,
                             use_cudnn=use_cudnn,
                             use_mkldnn=use_mkldnn)
    return pool_out
Exemplo n.º 17
0
	def deconvLayer(self,kernel,outchn,stride=1,pad='SAME',activation=-1,batch_norm=False):
		self.result = L.deconv2D(self.result,kernel,outchn,'deconv_'+str(self.layernum),stride=stride,pad=pad)
		if batch_norm:
			self.result = L.batch_norm(self.result,'batch_norm_'+str(self.layernum),training=self.bntraining,epsilon=self.epsilon)
		self.layernum+=1
		self.inpsize = self.result.get_shape().as_list()
		self.activate(activation)
		return self.result
Exemplo n.º 18
0
def generator(z, output_channel=1, reuse=False, training=True, kernel_size=4):
    """
    This function creates generator.
    
    :param z: random noise, ex) tensor shapes [None, 100]
    :param output_channel: number of channels of generated data, ex) 3 for SVHN, 1 for MNIST
    :param reuse: ...
    :param training: ...
    :param kernel_size: transposed conv layer's kernel size 
    
    :return: generated data(fake data) tensor, ex) tensor shapes [None, 28, 28, 1] for MNIST
    """
    with tf.variable_scope(GENERATOR, reuse=reuse):
        with tf.name_scope('layer1'):
            projected_z = tf.layers.dense(z, 7 * 7 * 256)
            reshaped_z = tf.reshape(projected_z, [-1, 7, 7, 256])
            layer1 = batch_norm(reshaped_z, training=training)
            layer1 = tf.nn.relu(layer1)

        with tf.name_scope('layer2'):
            layer2 = tf.layers.conv2d_transpose(layer1,
                                                128,
                                                kernel_size,
                                                strides=2,
                                                padding='same')
            layer2 = batch_norm(layer2, training=training)
            layer2 = tf.nn.relu(layer2)

        with tf.name_scope('layer3'):
            layer3 = tf.layers.conv2d_transpose(layer2,
                                                64,
                                                kernel_size,
                                                strides=2,
                                                padding='same')
            layer3 = batch_norm(layer3, training=training)
            layer3 = tf.nn.relu(layer3)

        with tf.name_scope('output'):
            logits = tf.layers.conv2d_transpose(layer3,
                                                output_channel,
                                                kernel_size,
                                                strides=1,
                                                padding='same')
            output = tf.nn.tanh(logits)

        return output
Exemplo n.º 19
0
def deep_network_with_batchnorm(x,
                                y=None,
                                number_of_classes=2,
                                filters=(16, 32, 64, 128),
                                strides=(2, 1, 2, 1),
                                is_training=True):
    # TODO: Do the same as with deep_network, but this time add batchnorm before each convoulution.

    logits = None
    params = {}
    assert len(filters)==len(strides), 'The parameters filter and stride should have the same length, had length %d and %d' \
    %((len(filters), len(strides)))

    update_ops = []  #Fill this with update_ops from batch_norm

    ###### YOUR CODE #######
    # Build your network and output logits
    out = x

    for i, (filter, stride) in enumerate(zip(filters, strides), start=1):
        bn_out, bn_params, update_op = batch_norm(out, is_training)
        conv, conv_params = conv2d(bn_out,
                                   number_of_features=filter,
                                   stride=stride,
                                   k_size=3)  # k_size given by assignment
        out = tf.nn.relu(conv)

        for key, value in conv_params.items() + bn_params.items():
            params['conv%d/%s' % (i, key)] = value

        update_ops.append(update_op)

    logits, dense_params = fully_connected_layer(out, number_of_classes)

    for key, value in dense_params.items():
        params['fc/%s' % key] = value

    # END OF YOUR CODE

    if y is None:
        return logits, params, update_ops

    # TODO: Calculate softmax cross-entropy
    #  without using any of the softmax or cross-entropy functions from Tensorflow
    loss = None

    ###### YOUR CODE #######
    # Calculate loss
    h = tf.exp(logits - tf.reduce_max(logits, axis=1, keep_dims=True))
    h /= tf.reduce_sum(h, axis=1, keep_dims=True)

    loss = -tf.reduce_sum(y * tf.log(h), axis=1, keep_dims=True)
    loss = tf.reduce_mean(loss)
    #loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y) # For comparison and debug
    # END OF YOUR CODE

    update_op = tf.group(*tuple(update_ops))
    return logits, loss, params, update_op
Exemplo n.º 20
0
	def fcLayer(self,outsize,activation=-1,nobias=False,batch_norm=False):
		with tf.variable_scope('fc_'+str(self.layernum)):
			self.result = L.Fcnn(self.result,self.inpsize[1],outsize,'fc_'+str(self.layernum),nobias=nobias)
			if batch_norm:
				self.result = L.batch_norm(self.result,'batch_norm_'+str(self.layernum),training=self.bntraining,epsilon=self.epsilon)
			self.inpsize[1] = outsize
			self.activate(activation)
			self.layernum+=1
		return self.result
Exemplo n.º 21
0
    def discriminator(self, x, reuse=None):
        with tf.variable_scope("discriminator", reuse=reuse):
            x = lay.conv2d(x, f=self.df_dim, name='d-conv2d-0')
            x = tf.nn.leaky_relu(x, alpha=0.1)

            x = lay.conv2d(x, f=self.df_dim * 2, name='d-conv2d-1')
            x = lay.batch_norm(x)
            x = tf.nn.leaky_relu(x, alpha=0.1)

            x = tf.layers.flatten(x)

            x = tf.layers.dense(x, units=self.fc_unit, name='d-fc-0')
            x = lay.batch_norm(x)
            x = tf.nn.leaky_relu(x, alpha=0.1)

            logits = tf.layers.dense(x, units=1, name='d-fc-1')
            prob = tf.nn.sigmoid(logits)

            return prob, logits, x
Exemplo n.º 22
0
    def generator(self,input,reuse= True):
        depth = [1024,512,256,128,3]

        with tf.variable_scope("Generator", reuse = reuse):
            with tf.variable_scope("g_1",reuse=reuse):
                net = layers.dc_dense(input , 4*4*depth[0],"g_w1")
                net = tf.reshape(net, [-1, 4,4,depth[0]])
                net = tf.nn.relu(net)
            with tf.variable_scope("g_2",reuse= reuse):
                net = tf.nn.relu(layers.batch_norm(layers.dc_deconv(net,depth[1],"g_w2")))
            with tf.variable_scope("g_3",reuse=reuse):
                net = tf.nn.relu(layers.batch_norm(layers.dc_deconv(net,depth[2],"g_w3")))
            with tf.variable_scope("g_4",reuse=reuse):
                net = tf.nn.relu(layers.batch_norm(layers.dc_deconv(net,depth[3],"g_w4")))
            with tf.variable_scope("g_5",reuse=reuse):
                net = layers.dc_deconv(net,depth[4],"g_w5")
                net = tf.nn.tanh(net)

            return net
Exemplo n.º 23
0
def discriminator(images, reuse=False, alpha=0.2):
    """
    Create Discriminator
    
    :param images: tensor shapes [None, 28, 28, 1] 
    :param reuse: ...
    :param alpha: leaky relu alpha
    :return: discriminator output and logits
    """
    with tf.variable_scope(DISCRIMINATOR, reuse=reuse):
        with tf.name_scope('layer1'):
            layer1 = tf.layers.conv2d(images, 64, 4, strides=2, padding='same')
            layer1 = leaky_relu(layer1, alpha)
            # 14x14x64

        with tf.name_scope('layer2'):
            layer2 = tf.layers.conv2d(layer1,
                                      128,
                                      4,
                                      strides=2,
                                      padding='same')
            layer2 = batch_norm(layer2, training=True)
            layer2 = leaky_relu(layer2, alpha)
            # 7x7x128

        with tf.name_scope('layer3'):
            layer3 = tf.layers.conv2d(layer2,
                                      256,
                                      4,
                                      strides=2,
                                      padding='same')
            layer3 = batch_norm(layer3, training=True)
            layer3 = leaky_relu(layer3, alpha)
            # 4x4x256

        # TODO: Make robust to tensor shapes using tensor's get_shape method
        with tf.name_scope('output'):
            flatten = tf.reshape(layer3, [-1, 4 * 4 * 256])
            logits = tf.layers.dense(flatten, 1)
            output = tf.nn.sigmoid(logits)

        return output, logits
Exemplo n.º 24
0
    def generator(self, z, image_Y, config):
        with tf.variable_scope("generator"):
            h0 = linear(z,
                        100,
                        config.image_size * config.image_size,
                        name="g_h0_lin")
            h0 = tf.reshape(h0, [-1, config.image_size, config.image_size, 1])
            h0 = tf.nn.relu(batch_norm(h0, name="g_bn0"))

            h1 = tf.concat([image_Y, h0], 3)
            h1 = layers.conv(h1, 2, 128, name="g_h1_conv")
            h1 = tf.nn.relu(batch_norm(h1, name="g_bn1"))

            h2 = tf.concat([image_Y, h1], 3)
            h2 = layers.conv(h2, 129, 64, name="g_h2_conv")
            h2 = tf.nn.relu(batch_norm(h2, name="g_bn2"))

            h3 = tf.concat([image_Y, h2], 3)
            h3 = layers.conv(h3, 65, 64, name="g_h3_conv")
            h3 = tf.nn.relu(batch_norm(h3, name="g_bn3"))

            h4 = tf.concat([image_Y, h3], 3)
            h4 = layers.conv(h4, 65, 64, name="g_h4_conv")
            h4 = tf.nn.relu(batch_norm(h4, name="g_bn4"))

            h5 = tf.concat([image_Y, h4], 3)
            h5 = layers.conv(h5, 65, 32, name="g_h5_conv")
            h5 = tf.nn.relu(batch_norm(h5, name="g_bn5"))

            h6 = tf.concat([image_Y, h5], 3)
            h6 = layers.conv(h6, 33, 2, name="g_h6_conv")
            return tf.nn.tanh(h6)
Exemplo n.º 25
0
    def generator(self, z, reuse=None):
        with tf.variable_scope("generator", reuse=tf.AUTO_REUSE):
            x = tf.layers.dense(z, units=self.fc_unit, name='g-fc-0')
            x = lay.batch_norm(x)
            x = tf.nn.leaky_relu(x, alpha=0.1)

            x = tf.layers.dense(x,
                                units=8 * 8 * self.gf_dim * 2,
                                name='g-fc-1')
            x = lay.batch_norm(x)
            x = tf.nn.leaky_relu(x, alpha=0.1)

            x = tf.reshape(x, shape=[-1, 8, 8, self.gf_dim * 2])

            x = lay.deconv2d(x, f=self.gf_dim, name='g-conv2d-0')
            x = lay.batch_norm(x)
            x = tf.nn.leaky_relu(x, alpha=0.1)

            x = lay.deconv2d(x, f=3, name='g-conv2d-1')
            x = tf.nn.tanh(x)

            return x
Exemplo n.º 26
0
    def discriminator(self, image, reuse=False, config=None):
        with tf.variable_scope("discriminator") as scope:
            if reuse:
                scope.reuse_variables()

            h0 = layers.lrelu(layers.conv(image, 3, 64, name='d_h0_conv'))
            h1 = layers.lrelu(
                batch_norm(layers.conv(h0, 64, 128, name='d_h1_conv'),
                           name='d_bn1'))
            h2 = layers.lrelu(
                batch_norm(layers.conv(h1, 128, 256, name='d_h2_conv'),
                           name='d_bn2'))
            h3 = layers.lrelu(
                batch_norm(layers.conv(h2, 256, 512, name='d_h3_conv'),
                           name='d_bn3'))

            h4 = linear(tf.reshape(h3, [config.batch_size, -1]),
                        524288,
                        64,
                        name="d_h4_lin")
            h5 = linear(h4, 64, 1, name="d_h5_lin")
            return h5
Exemplo n.º 27
0
    def resn1d(self, x1d, reuse=False):
        with tf.variable_scope('resn_1d', reuse=reuse) as scope:
            act = tf.nn.relu
            filters_1d = self.model_config['1d']['filters']
            kernel_size_1d = self.model_config['1d']['kernel_size']
            block_num_1d = self.model_config['1d']['block_num']

            kernel_initializer = tf.glorot_normal_initializer()
            bias_initializer = tf.zeros_initializer()
            kernel_regularizer = tf.contrib.layers.l1_l2_regularizer(
                scale_l1=self.train_config.l1_reg,
                scale_l2=self.train_config.l2_reg)
            bias_regularizer = tf.contrib.layers.l1_l2_regularizer(
                scale_l1=self.train_config.l1_reg,
                scale_l2=self.train_config.l2_reg)

            block = layers.resn1d_regular_block_v3
            for i in np.arange(block_num_1d):
                inputs = x1d if i == 0 else conv_1d
                conv_1d = block(inputs,
                                act,
                                filters_1d,
                                kernel_size_1d,
                                kernel_initializer,
                                bias_initializer,
                                kernel_regularizer,
                                bias_regularizer,
                                self.training,
                                names='conv_layer_{}'.format(i))

            conv_1d = layers.batch_norm(conv_1d, training=self.training)
            conv_1d = act(conv_1d)
            logits = tf.layers.conv1d(
                inputs=conv_1d,
                filters=self.model_config['1d_label_size'],
                kernel_size=kernel_size_1d,
                strides=1,
                padding='same',
                kernel_initializer=kernel_initializer,
                bias_initializer=bias_initializer,
                kernel_regularizer=kernel_regularizer,
                bias_regularizer=bias_regularizer,
                use_bias=True)
            '''
            lr_logits = tf.layers.conv1d(inputs=inputs, filters=self.model_config['1d_label_size'],
                    kernel_size=kernel_size_1d, strides=1, padding='same',
                    kernel_initializer=kernel_initializer, bias_initializer=bias_initializer,
                    kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, use_bias=True)
        return logits + lr_logits
            '''
        return logits
Exemplo n.º 28
0
    def make_core_graph(self):
        conv1 = layers.max_pool1d(layers.batch_norm(
            layers.conv1dLayer(self.x,
                               filterSize=19,
                               outputDim=300,
                               stride=1,
                               name="conv1"), self.is_train),
                                  size=3,
                                  stride=3,
                                  name="pool1")

        conv2 = layers.max_pool1d(layers.batch_norm(
            layers.conv1dLayer(conv1,
                               filterSize=11,
                               outputDim=200,
                               stride=1,
                               name="conv2"), self.is_train),
                                  size=4,
                                  stride=4,
                                  name="pool2")

        conv3 = layers.max_pool1d(layers.batch_norm(
            layers.conv1dLayer(conv2,
                               filterSize=7,
                               outputDim=200,
                               stride=1,
                               name="conv3"), self.is_train),
                                  size=4,
                                  stride=4,
                                  name="pool3")

        fc1 = layers.batch_norm(
            layers.denseLayer(conv3, outputDim=1000, name="fc1"),
            self.is_train)
        fc2 = layers.batch_norm(
            layers.denseLayer(fc1, outputDim=1000, name="fc2"), self.is_train)

        return fc2
def generator(inputs, batch_size, training):
    with tf.name_scope('generator'):
        net = layers.fully_connected_layer(1, inputs, 4 * 4 * 512, None)
        net = tf.reshape(net, [batch_size, 4, 4, 512])
        net = layers.batch_norm(net, training, name='bn1')
        net = layers.conv2d_transpose_layer(1,
                                            net, [5, 5, 256],
                                            batch_size,
                                            stride=2)
        net = layers.batch_norm(net, training, name='bn2')
        net = layers.conv2d_transpose_layer(2,
                                            net, [5, 5, 128],
                                            batch_size,
                                            stride=2)
        net = layers.batch_norm(net, training, name='bn3')
        net = layers.conv2d_transpose_layer(3,
                                            net, [5, 5, 1],
                                            batch_size,
                                            tf.nn.sigmoid,
                                            stride=2,
                                            zero_biases=True)

        return net
Exemplo n.º 30
0
def discriminator(inputs, dim=64):
    with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE):
        x = tf.reshape(inputs, [-1, 3, dim, dim])

        x = conv2d('c1', 3, dim, 5, x, stride=2)
        x = tf.nn.leaky_relu(x)

        x = conv2d('c2', dim, 2 * dim, 5, x, stride=2)
        x = batch_norm('bn2', x)
        x = tf.nn.leaky_relu(x)

        x = conv2d('c3', 2 * dim, 4 * dim, 5, x, stride=2)
        x = batch_norm('bn3', x)
        x = tf.nn.leaky_relu(x)

        x = conv2d('c4', 4 * dim, 8 * dim, 5, x, stride=2)
        x = batch_norm('bn4', x)
        x = tf.nn.leaky_relu(x)

        x = tf.reshape(x, [-1, 8 * 4 * 4 * dim])
        x = linear('output', 8 * 4 * 4 * dim, 1, x)

        return tf.reshape(x, [-1])
Exemplo n.º 31
0
def get_model(X, batch_size, image_dimension):

	input_shape = (batch_size, 3, image_dimension[0], image_dimension[1])
	all_parameters = []
	acc_parameters = []

	#############################################
	# a first convolution with 64 (3, 3) filters
	output, output_test, params, output_shape = convolutional(X, X, input_shape, 64, (3, 3))
	all_parameters += params

	# maxpool with size=(2, 2)
	output, output_test, params, output_shape = maxpool(output, output_test, output_shape, (2, 2))

	# relu activation
	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')

	#############################################
	# a second convolution with 128 (3, 3) filters
	output, output_test, params, output_shape = convolutional(output, output_test, output_shape, 128, (3, 3))
	all_parameters += params

	# maxpool with size=(2, 2)
	output, output_test, params, output_shape = maxpool(output, output_test, output_shape, (2, 2))

	# relu activation
	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')
	
	#############################################
	# 2 convolutional layers with 256 (3, 3) filters
	output, output_test, params, output_shape = convolutional(output, output_test, output_shape, 256, (3, 3))
	all_parameters += params
	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')
	output, output_test, params, output_shape = convolutional(output, output_test, output_shape, 256, (3, 3))
	all_parameters += params

	# maxpool with size=(2, 2)
	output, output_test, params, output_shape = maxpool(output, output_test, output_shape, (2, 2))

	# relu activation
	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')

	#############################################
	# Fully connected
	output, output_test, params, output_shape = convolutional(output, output_test, output_shape, 1024, (1, 1))
	all_parameters += params
	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')
	output, output_test, params, output_shape = convolutional(output, output_test, output_shape, 1024, (1, 1))
	all_parameters += params

	# maxpool with size=(4, 4) and fully connected
	output, output_test, params, output_shape = avgpool(output, output_test, output_shape, (4, 4))
	output, output_test, params, output_shape = convolutional(output, output_test, output_shape, 10, (1, 1))
	all_parameters += params

	output, output_test, params, output_shape, cacc_parameters = batch_norm(output, output_test, output_shape)
	acc_parameters += cacc_parameters
	all_parameters += params

	# softmax
	output = multi_dim_softmax(output)
	output_test = multi_dim_softmax(output_test)

	#
	return output, output_test, all_parameters, acc_parameters
Exemplo n.º 32
0
def img_conv_group(input,
                   conv_num_filter,
                   pool_size,
                   conv_padding=1,
                   conv_filter_size=3,
                   conv_act=None,
                   conv_with_batchnorm=False,
                   conv_batchnorm_drop_rate=None,
                   pool_stride=1,
                   pool_type=None,
                   main_program=None,
                   startup_program=None):
    """
    Image Convolution Group, Used for vgg net.
    """
    tmp = input
    assert isinstance(conv_num_filter, list) or \
        isinstance(conv_num_filter, tuple)

    def __extend_list__(obj):
        if not hasattr(obj, '__len__'):
            return [obj] * len(conv_num_filter)
        else:
            return obj

    conv_padding = __extend_list__(conv_padding)
    conv_filter_size = __extend_list__(conv_filter_size)
    conv_with_batchnorm = __extend_list__(conv_with_batchnorm)
    conv_batchnorm_drop_rate = __extend_list__(conv_batchnorm_drop_rate)

    for i in xrange(len(conv_num_filter)):
        local_conv_act = conv_act
        if conv_with_batchnorm[i]:
            local_conv_act = None

        tmp = layers.conv2d(
            input=tmp,
            num_filters=conv_num_filter[i],
            filter_size=conv_filter_size[i],
            padding=conv_padding[i],
            act=local_conv_act,
            main_program=main_program,
            startup_program=startup_program)

        if conv_with_batchnorm[i]:
            tmp = layers.batch_norm(
                input=tmp,
                act=conv_act,
                main_program=main_program,
                startup_program=startup_program)
            drop_rate = conv_batchnorm_drop_rate[i]
            if abs(drop_rate) > 1e-5:
                tmp = layers.dropout(
                    x=tmp,
                    dropout_prob=drop_rate,
                    main_program=main_program,
                    startup_program=startup_program)

    pool_out = layers.pool2d(
        input=tmp,
        pool_size=pool_size,
        pool_type=pool_type,
        pool_stride=pool_stride,
        main_program=main_program,
        startup_program=startup_program)
    return pool_out