def __init(self,
               name,
               kernel_shape,
               bias_shape,
               padding,
               stride=1,
               activation_func=None):
        NetworkLayer.__init__(self, name)
        with tf.variable_scope(name):
            self.weights = tf.get_variable(
                'w',
                kernel_shape,
                initializer=layers.xavier_initializer_conv2d(uniform=False))
            self.bias = tf.get_variable(
                'b', bias_shape, initializer=tf.random_normal_initializer)
        assert self.weights.name == name + '/w:0' and self.bias.name == name + '/b:0'
        tf.add_to_collection(self.weights.name, self.weights)
        tf.add_to_collection(self.bias.name, self.bias)

        self.__activation_func = activation_func
        self.__padding = padding
        self.__stride = stride
Exemplo n.º 2
0
 def encoder(self, x):
     shapes = []
     n_hidden = [1] + self.n_hidden
     input = x
     for i, k_size in enumerate(self.kernel_size):
         w = tf.get_variable(
             'enc_w{}'.format(i),
             shape=[k_size, k_size, n_hidden[i], n_hidden[i + 1]],
             initializer=layers.xavier_initializer_conv2d(),
             regularizer=self.reg)
         b = tf.get_variable('enc_b{}'.format(i),
                             shape=[n_hidden[i + 1]],
                             initializer=tf.zeros_initializer())
         shapes.append(input.get_shape().as_list())
         enc_i = tf.nn.conv2d(input,
                              w,
                              strides=[1, 2, 2, 1],
                              padding='SAME')
         enc_i = tf.nn.bias_add(enc_i, b)
         enc_i = tf.nn.relu(enc_i)
         input = enc_i
     return input, shapes
Exemplo n.º 3
0
def conv2d(input, filter_shape, strides=2, padding='SAME', name='conv'):
    with tf.variable_scope(name):
        w = tf.get_variable(name='w',
                            shape=filter_shape,
                            dtype=tf.float32,
                            initializer=xavier_initializer_conv2d())

        tf.summary.histogram('w_' + name, w)

        b = tf.get_variable(name='b',
                            shape=[filter_shape[-1]],
                            dtype=tf.float32,
                            initializer=tf.zeros_initializer())

        tf.summary.histogram('b_' + name, b)

        conv = tf.nn.conv2d(input=input,
                            filter=w,
                            strides=[1, strides, strides, 1],
                            padding=padding,
                            use_cudnn_on_gpu=True)
        return lrelu(conv + b, 0.02)
    def __init__(self, label_size, network_architecture, activation=tf.nn.relu, max_grad_norm=1,
                 learning_rate=0.001, batch_size=100, save_path=None, load_model=None):
        """
        :param dict network_architecture: dictionary with following elements
            n_input: shape of input
            n_z: dimensionality of latent space

        :param activation: activation function (tensor flow function)
        :param float learning_rate:
        :param int batch_size:
        """
        self.network_architecture = network_architecture
        self.max_grad_norm = max_grad_norm
        self.activation = activation
        self.learning_rate = learning_rate
        self.batch_size = batch_size
        self.label_size = label_size

        # Initializer
        if "relu" in self.activation.__name__:
            self.ini_c, self.ini = variance_scaling_initializer(), variance_scaling_initializer()
        else:
            self.ini_c, self.ini = xavier_initializer_conv2d(), xavier_initializer()

        # Create network
        self._create_network()

        # Summary
        tf.summary.scalar("loss", self.loss)
        # Launch the session
        self.sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
        # Summary writer for tensor board
        self.summary = tf.summary.merge_all()
        if save_path:
            self.writer = tf.summary.FileWriter(save_path, self.sess.graph)
        # Load model
        if load_model:
            tf.reset_default_graph()
            self.saver.restore(self.sess, load_model)
Exemplo n.º 5
0
def gen_nlayer_conv_decoder(X, decoder_para):
	'''
		generate a n-layer convolutional encoder

        @encoder_para: dict, parameters of method, including:
            @@kernal_size: list, shape=[n:4], [[height, width, output_channel, input_channel], ... ];
				notice that elements in kernal_size has the same order with that in encoder. That is to say
				input_channel of encoder is the output_channel of decoder.
            @@strides: list, shape=[n:4], [[batch_stride, height_stride, width_stride, channel_stride], ...]
            @@output_size: list, shape=[4:], [batch, height, width, channel]
	'''

	kernal_size = decoder_para['kernal_size']
	strides = decoder_para['strides']
	output_size = decoder_para['output_size']
	
	last_layer = X
	n_layers = len(kernal_size)
	for l_idx in xrange(n_layers):
		knl_w = tf.get_variable('dec_w_%d'%l_idx, shape=kernal_size[n_layers - l_idx - 1],\
		 initializer=layers.xavier_initializer_conv2d())
		knl_b = tf.Variable(tf.zeros([kernal_size[n_layers - l_idx - 1][-2]]))
		stride = strides[n_layers - l_idx - 1]

		#calculate the output shape
		last_ly_shape = last_layer.get_shape().as_list()
		out_height = last_ly_shape[1] * stride[1] if l_idx!=n_layers-1 else output_size[1]
		out_width = last_ly_shape[2] * stride[2] if l_idx!=n_layers-1 else output_size[2]
		out_channel = kernal_size[n_layers - l_idx - 1][-2] if l_idx!=n_layers-1 else output_size[3]

		#generate last layer
		last_layer = tf.nn.bias_add(tf.nn.conv2d_transpose(last_layer, knl_w, \
		tf.stack([output_size[0], out_height, out_width, out_channel]), strides=stride, padding='SAME'),\
		knl_b)
		last_layer = tf.nn.relu(last_layer)
	
	return last_layer
Exemplo n.º 6
0
def gen_conv_encoder(encoder_para):
    '''
        generate a one layer convolutional encoder

        @encoder_para: dict, parameters of method, including:
            @@img_size: np.ndarray, shape=[4:], the size of input image
            @@kernal_size: np.ndarray, shape=[4:], [width, height, input_channel, output_channel]
            @@strides: np.ndarray, shape=[4:], [batch_stride, width_stride, height_stride, channel_stride]
    '''

    img_size = encoder_para['img_size']
    kernal_size = encoder_para['kernal_size']
    strides = encoder_para['strides']

    # generate X and weight
    X = tf.placeholder(tf.float32,
                       img_size)  #batch_size, img_width, img_height, channel
    enc_knl = tf.get_variable("enc_knl", shape=kernal_size, \
    initializer=layers.xavier_initializer_conv2d())
    enc_b = tf.Variable(tf.zeros([kernal_size[-1]], dtype=tf.float32))
    #generate conv layer
    conv_layer = tf.nn.conv2d(X, enc_knl, strides, padding='SAME')
    conv_layer = tf.nn.relu(tf.nn.bias_add(conv_layer, enc_b))
    return conv_layer, X
Exemplo n.º 7
0
 def conv2d(input_node,
            channels_n,
            level,
            is_training,
            stride=1,
            activation=True):
     input_shape = input_node.get_shape().as_list()
     assert len(input_shape) == 4, 'Tensor with rank 4 is expected.'
     in_channels_n = input_shape[3]
     with tf.variable_scope(f'conv_{level}'):
         Wconv1 = tf.get_variable(f"W_{level}",
                                  shape=[3, 3, in_channels_n, channels_n],
                                  initializer=xavier_initializer_conv2d())
         bconv1 = tf.get_variable(f"b_{level}",
                                  shape=[channels_n],
                                  initializer=tf.zeros_initializer)
         conv = tf.nn.conv2d(input_node,
                             Wconv1,
                             strides=[1, stride, stride, 1],
                             padding='SAME') + bconv1
         batch_norm = tf.layers.batch_normalization(conv,
                                                    training=is_training)
         output = tf.nn.relu(batch_norm) if activation else batch_norm
     return output
Exemplo n.º 8
0
 def conv2d(self,
            inputs,
            filters=16,
            kernel_size=3,
            strides=1,
            padding='VALID',
            activation=None,
            kernel_initializer=layers.xavier_initializer_conv2d(),
            bias_initializer=tf.constant_initializer(0.2),
            kernel_regularizer=layers.l2_regularizer(2e-4),
            use_bias=True,
            name="conv"):
     return tf.layers.conv2d(inputs,
                             filters=filters,
                             kernel_size=kernel_size,
                             strides=strides,
                             padding=padding,
                             data_format="channels_first",
                             activation=activation,
                             kernel_initializer=kernel_initializer,
                             kernel_regularizer=kernel_regularizer,
                             bias_initializer=bias_initializer,
                             use_bias=use_bias,
                             name=name)
Exemplo n.º 9
0
    def __init_model__(self, input_shape):
        self.states = tf.placeholder(tf.float32, [None] + list(input_shape),
                                     name='states')
        self.actions = tf.placeholder(tf.float32, [None, 1], name='actions')
        self.weights = tf.placeholder(tf.float32, [None, 1], name='weights')
        self.learning_rate = tf.placeholder(tf.float32, name='learning_rate')
        self.beta = tf.placeholder(tf.float32, name='beta')

        conv1 = tf.layers.conv2d(
            inputs=self.states,
            filters=64,
            kernel_size=8,
            strides=4,
            data_format='channels_first',
            activation=tf.nn.tanh,
            kernel_initializer=xavier_initializer_conv2d())
        h1 = tf.layers.dense(inputs=tf.layers.flatten(conv1),
                             units=128,
                             activation=tf.nn.tanh,
                             kernel_initializer=xavier_initializer())
        self.prob_1 = tf.layers.dense(inputs=h1,
                                      units=1,
                                      activation=tf.sigmoid,
                                      kernel_initializer=xavier_initializer())
        self.prob_1 = self.prob_1 * 0.9998 + 0.0001
        prob_0 = 1 - self.prob_1
        entropy = self.prob_1 * tf.log(1 / self.prob_1) + prob_0 * tf.log(
            1 / prob_0)
        regularizer = tf.reduce_mean(entropy)

        loss = tf.losses.log_loss(labels=self.actions,
                                  predictions=self.prob_1,
                                  weights=self.weights)

        optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
        self.train_op = optimizer.minimize(loss - self.beta * regularizer)
Exemplo n.º 10
0
  def construct_conv_weights(self):
    weights = {}

    dtype = tf.float32
    conv_initializer = contrib_layers.xavier_initializer_conv2d(dtype=dtype)
    fc_initializer = contrib_layers.xavier_initializer(dtype=dtype)
    k = 3

    weights['conv1'] = tf.get_variable('conv1', [k, k, self.channels, self.dim_hidden], initializer=conv_initializer, dtype=dtype)
    weights['b1'] = tf.Variable(tf.zeros([self.dim_hidden]))
    weights['conv2'] = tf.get_variable('conv2', [k, k, self.dim_hidden, self.dim_hidden], initializer=conv_initializer, dtype=dtype)
    weights['b2'] = tf.Variable(tf.zeros([self.dim_hidden]))
    weights['conv3'] = tf.get_variable('conv3', [k, k, self.dim_hidden, self.dim_hidden], initializer=conv_initializer, dtype=dtype)
    weights['b3'] = tf.Variable(tf.zeros([self.dim_hidden]))
    weights['conv4'] = tf.get_variable('conv4', [k, k, self.dim_hidden, self.dim_hidden], initializer=conv_initializer, dtype=dtype)
    weights['b4'] = tf.Variable(tf.zeros([self.dim_hidden]))
    if FLAGS.datasource == 'miniimagenet' or FLAGS.datasource == 'dclaw':
      # assumes max pooling
      weights['w5'] = tf.get_variable('w5', [self.dim_hidden*5*5, self.dim_output], initializer=fc_initializer)
      weights['b5'] = tf.Variable(tf.zeros([self.dim_output]), name='b5')
    else:
      weights['w5'] = tf.Variable(tf.random_normal([self.dim_hidden, self.dim_output]), name='w5')
      weights['b5'] = tf.Variable(tf.zeros([self.dim_output]), name='b5')
    return weights
Exemplo n.º 11
0
    def encoder(self, imgs, activation, is_training, batch_size, img_shape,
                channels):
        with tf.variable_scope('encoder',
                               reuse=tf.AUTO_REUSE,
                               initializer=xavier_initializer_conv2d(),
                               regularizer=l2_regularizer(0.01)):
            # stn >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
            n_fc = 6
            initial = np.array([[1., 0, 0], [0, 1., 0]])
            initial = initial.astype('float32').flatten()

            W_fc1 = tf.Variable(
                tf.zeros(shape=[img_shape * img_shape * channels, n_fc]),
                name='W_fc1',
                validate_shape=False)
            b_fc1 = tf.Variable(initial_value=initial, name='b_fc1')
            h_fc1 = tf.matmul(
                tf.zeros([batch_size, img_shape * img_shape * channels]),
                W_fc1) + b_fc1

            h_trans = transformer(imgs, h_fc1)
            # stn <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

            e_conv = self.coord_conv(h_trans,
                                     48,
                                     3,
                                     padding='same',
                                     activation=None)
            e_conv = tf.layers.batch_normalization(e_conv,
                                                   training=is_training,
                                                   fused=True)
            e_conv = activation(e_conv)
            e_conv = tf.layers.max_pooling2d(e_conv, 2, 2)

            e_conv = self.coord_conv(e_conv,
                                     92,
                                     3,
                                     padding='same',
                                     activation=None)
            e_conv = tf.layers.batch_normalization(e_conv,
                                                   training=is_training,
                                                   fused=True)
            e_conv = activation(e_conv)
            e_conv = tf.layers.max_pooling2d(e_conv, 2, 2)

            e_conv = self.coord_conv(e_conv,
                                     256,
                                     3,
                                     padding='same',
                                     activation=None)
            e_conv = tf.layers.batch_normalization(e_conv,
                                                   training=is_training,
                                                   fused=True)
            e_conv = activation(e_conv)
            e_conv = tf.layers.max_pooling2d(e_conv, 2, 2)

            e_conv = self.coord_conv(e_conv,
                                     256,
                                     3,
                                     padding='same',
                                     activation=None)
            e_conv = tf.layers.batch_normalization(e_conv,
                                                   training=is_training,
                                                   fused=True)
            e_conv = activation(e_conv)
            e_conv = tf.layers.max_pooling2d(e_conv, 2, 2)

            e_conv = self.coord_conv(e_conv,
                                     256,
                                     3,
                                     padding='same',
                                     activation=None)
            e_conv = tf.layers.batch_normalization(e_conv,
                                                   training=is_training,
                                                   fused=True)
            e_conv = activation(e_conv)
            e_conv = tf.layers.max_pooling2d(e_conv, 2, 2)

            e_conv = self.coord_conv(e_conv,
                                     256,
                                     3,
                                     padding='same',
                                     activation=None)
            e_conv = tf.layers.batch_normalization(e_conv,
                                                   training=is_training,
                                                   fused=True)
            e_conv = activation(e_conv)
            e_conv = tf.layers.max_pooling2d(e_conv, 2, 2)

            lv = tf.layers.flatten(e_conv)
            return lv
Exemplo n.º 12
0
    def __init__(self, params, pretrained_model_path=""):
        self.params = params
        self.pretrained_model_path = pretrained_model_path
        dicts = helper.load_dictionaries(self.params.dicts_file)
        self.word2id = dicts["word2id"]
        self.id2word = dicts["id2word"]
        self.char2id = dicts["char2id"]
        self.id2char = dicts["id2char"]
        self.tag2id = dicts["tag2id"]
        self.id2tag = dicts["id2tag"]

        self.pretrained_emb = np.zeros(shape=(len(self.word2id),
                                              self.params.word_dim))
        if self.pretrained_model_path == "" and self.params.train != "" and self.params.pretrained_emb != "":
            self.pretrained_emb = helper.load_word_emb(
                self.word2id, self.pretrained_emb, self.params.pretrained_emb)

        # build model
        self.tf_word_ids = tf.placeholder(dtype=tf.int32,
                                          shape=[None, None],
                                          name="word_ids")
        self.tf_sentence_lengths = tf.placeholder(dtype=tf.int32,
                                                  shape=[None],
                                                  name="sentence_lengths")
        self.tf_labels = tf.placeholder(dtype=tf.int32,
                                        shape=[None, None],
                                        name="labels")
        self.tf_dropout = tf.placeholder(dtype=tf.float32,
                                         shape=[],
                                         name="drop_out")
        self.tf_learning_rate = tf.placeholder(dtype=tf.float32,
                                               shape=[],
                                               name="learning_rate")
        self.tf_char_ids = tf.placeholder(dtype=tf.int32,
                                          shape=[None, None, None],
                                          name="char_ids")
        self.tf_word_lengths = tf.placeholder(dtype=tf.int32,
                                              shape=[None, None],
                                              name="word_lengths")
        self.tf_raw_word = tf.placeholder(dtype=tf.string,
                                          shape=[None, None],
                                          name="raw_word")

        with tf.variable_scope("word_embedding"):
            tf_word_embeddings = tf.Variable(self.pretrained_emb,
                                             dtype=tf.float32,
                                             trainable=True,
                                             name="word_embedding")
            self.input = tf.nn.embedding_lookup(tf_word_embeddings,
                                                self.tf_word_ids,
                                                name="embedded_words")

        with tf.variable_scope("char_cnn"):
            tf_char_embeddings = tf.get_variable(
                name="char_embeddings",
                dtype=tf.float32,
                shape=[len(self.char2id), self.params.char_dim],
                trainable=True,
                initializer=xavier_initializer())
            embedded_cnn_chars = tf.nn.embedding_lookup(
                tf_char_embeddings,
                self.tf_char_ids,
                name="embedded_cnn_chars")
            conv1 = tf.layers.conv2d(
                inputs=embedded_cnn_chars,
                filters=self.params.nb_filters_1,
                kernel_size=(1, 3),
                strides=(1, 1),
                padding="same",
                name="conv1",
                kernel_initializer=xavier_initializer_conv2d())
            conv2 = tf.layers.conv2d(
                inputs=conv1,
                filters=self.params.nb_filters_2,
                kernel_size=(1, 3),
                strides=(1, 1),
                padding="same",
                name="conv2",
                kernel_initializer=xavier_initializer_conv2d())
            char_cnn = tf.reduce_max(conv2, axis=2)
            self.input = tf.concat([self.input, char_cnn], axis=-1)

        with tf.variable_scope("elmo_emb"):
            elmo = hub.Module("/elmo2", trainable=False)
            embeddings = \
                elmo(inputs={"tokens": self.tf_raw_word, "sequence_len": self.tf_sentence_lengths}, signature="tokens",
                     as_dict=True)["elmo"]  # num_sent, max_sent_len, 1024
            elmo_emb = tf.layers.dense(inputs=embeddings,
                                       units=self.params.elmo_dim,
                                       activation=None)
            self.input = tf.concat([self.input, elmo_emb], axis=-1)

        self.input = tf.nn.dropout(self.input, self.tf_dropout)

        with tf.variable_scope("bi_lstm_words"):
            cell_fw = tf.contrib.rnn.LSTMCell(self.params.word_hidden_size)
            cell_bw = tf.contrib.rnn.LSTMCell(self.params.word_hidden_size)
            (output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(
                cell_fw,
                cell_bw,
                self.input,
                sequence_length=self.tf_sentence_lengths,
                dtype=tf.float32)
            self.output = tf.concat([output_fw, output_bw], axis=-1)
            ntime_steps = tf.shape(self.output)[1]
            self.output = tf.reshape(self.output,
                                     [-1, 2 * params.word_hidden_size])
            layer1 = tf.nn.dropout(
                tf.layers.dense(inputs=self.output,
                                units=params.word_hidden_size,
                                activation=None,
                                kernel_initializer=xavier_initializer()),
                self.tf_dropout)
            pred = tf.layers.dense(inputs=layer1,
                                   units=len(self.tag2id),
                                   activation=None,
                                   kernel_initializer=xavier_initializer())
            self.logits = tf.reshape(pred, [-1, ntime_steps, len(self.tag2id)])

            # compute loss value using crf
            log_likelihood, self.transition_params = tf.contrib.crf.crf_log_likelihood(
                self.logits, self.tf_labels, self.tf_sentence_lengths)
        with tf.variable_scope("loss_and_opt"):
            self.tf_loss = tf.reduce_mean(-log_likelihood)
            optimizer = tf.train.AdamOptimizer(
                learning_rate=self.tf_learning_rate)
            self.tf_train_op = optimizer.minimize(self.tf_loss)
Exemplo n.º 13
0
    def __init__(self, params=None):
        self.params = params

        self.keep_prob = params["keep_prob"]
        self.nb_epochs = params["nb_epochs"]

        # load word and char dictionaries
        dicts = pickle.load(open(params["dicts_file"], "rb"))
        self.w2i = dicts["w2i"]
        self.i2w = dicts["i2w"]
        self.c2i = dicts["c2i"]
        self.i2c = dicts["i2c"]

        self.word_dim = params["word_dim"]
        self.word_vocab_size = len(self.w2i)
        self.char_dim = params["char_dim"]
        self.char_vocab_size = len(self.c2i)

        print("Sizes of word and char dictionaries: {}, {}".format(
            self.word_vocab_size, self.char_vocab_size))

        self.word_emb = np.zeros(shape=(self.word_vocab_size, self.word_dim))
        # load word embedding
        if "word_emb" in params:
            print("pre-trained word embedding {} is being loaded ...".format(
                params["word_emb"]))
            self.load_word_emb(params["word_emb"])

        tf.reset_default_graph()

        # [sent, word]
        self.tf_word_ids = tf.placeholder(dtype=tf.int32,
                                          shape=[None, None],
                                          name="word_ids")
        # real length sents
        self.tf_sentence_lengths = tf.placeholder(dtype=tf.int32,
                                                  shape=[None],
                                                  name="sentence_lengths")

        # [sent, word, char]
        self.tf_char_ids = tf.placeholder(dtype=tf.int32,
                                          shape=[None, None, None],
                                          name="char_ids")

        # binary matrix representing the relationship between sents: [sent, sent]
        self.tf_target_matrix = tf.placeholder(dtype=tf.int32,
                                               shape=[None],
                                               name="target_matrix")

        # keep_prob
        self.tf_keep_prob = tf.placeholder(dtype=tf.float32,
                                           shape=[],
                                           name="keep_prob")

        # learning rate
        self.tf_learning_rate = tf.placeholder(dtype=tf.float32,
                                               shape=[],
                                               name="learning_rate")

        # load word embedding
        with tf.variable_scope("word_embedding"):
            tf_word_embeddings = tf.Variable(self.word_emb,
                                             dtype=tf.float32,
                                             trainable=True,
                                             name="word_embedding")
            embedded_words = tf.nn.embedding_lookup(tf_word_embeddings,
                                                    self.tf_word_ids,
                                                    name="embedded_words")
            self.input = embedded_words  # sent, word, word_dim

        # CNN network to capture character-level features
        with tf.variable_scope("char_cnn"):
            tf_char_embeddings = tf.get_variable(
                name="char_embeddings",
                dtype=tf.float32,
                shape=[self.char_vocab_size, self.char_dim],
                trainable=True,
                initializer=xavier_initializer())

            conv = tf.nn.embedding_lookup(tf_char_embeddings,
                                          self.tf_char_ids,
                                          name="embedded_cnn_chars")
            for i, (ks, fil) in enumerate(self.params["conv"]):
                conv = tf.layers.conv2d(
                    inputs=conv,  # sent, word, char, feature
                    filters=fil,
                    kernel_size=(1, ks),
                    strides=(1, 1),
                    padding="same",
                    name="conv_{}".format(i),
                    kernel_initializer=xavier_initializer_conv2d())

            self.char_cnn = tf.reduce_max(conv,
                                          axis=2)  # sent, word, cnn_feature

            self.input = tf.nn.dropout(
                tf.concat([self.input, self.char_cnn], axis=-1),
                self.tf_keep_prob)  # [sents, words, word_dim + cnn_features]

        # Bi-LSTM to generate final input representation in combination with both left and right contexts
        with tf.variable_scope("bi_lstm_words"):
            cell_fw = tf.contrib.rnn.LSTMCell(self.params["word_lstm_units"])
            cell_bw = tf.contrib.rnn.LSTMCell(self.params["word_lstm_units"])
            (output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(
                cell_fw,
                cell_bw,
                self.input,
                sequence_length=self.tf_sentence_lengths,
                dtype=tf.float32)

            bilstm_output = tf.concat([output_fw, output_bw],
                                      axis=-1)  # [sents, words, 2*lstm_units]

            mask = tf.where(
                condition=tf.equal(self.tf_word_ids, self.w2i["<PAD>"]),
                x=-1e10 * tf.ones_like(self.tf_word_ids, dtype=tf.float32),
                y=tf.zeros_like(self.tf_word_ids, dtype=tf.float32))
            mask = tf.tile(tf.expand_dims(mask, -1),
                           (1, 1, 2 * self.params["word_lstm_units"]))
            bilstm_output = bilstm_output + mask

            bilstm_output = tf.reduce_max(bilstm_output,
                                          axis=1)  # [sents, 2*word_lstm_units]

        with tf.variable_scope("att"):
            c = bilstm_output[:-1, :]
            q = bilstm_output[-1:, :]
            de = c.get_shape()[-1]
            w = tf.get_variable(dtype=tf.float32,
                                shape=[1, de],
                                trainable=True,
                                name="w")
            w1 = tf.get_variable(dtype=tf.float32,
                                 shape=[de, de],
                                 trainable=True,
                                 name="w1")
            w2 = tf.get_variable(dtype=tf.float32,
                                 shape=[de, de],
                                 trainable=True,
                                 name="w2")
            b1 = tf.get_variable(dtype=tf.float32,
                                 shape=[de, 1],
                                 trainable=True,
                                 name="b1")
            b = tf.get_variable(dtype=tf.float32,
                                shape=[1],
                                trainable=True,
                                name="b")

            w1cT = tf.matmul(w1, tf.transpose(c))
            w2q = tf.matmul(w2, tf.transpose(q))

            s = tf.add(tf.add(w1cT, w2q), b1)

            f = tf.squeeze(tf.matmul(w, tf.tanh(s)) + b, axis=0)
            e = tf.nn.softmax(f)
            h = c * e[:, None]
            h = tf.add(h, tf.squeeze(q, axis=0))

        with tf.variable_scope("loss_and_opt"):
            self.logits = tf.nn.dropout(
                tf.layers.dense(inputs=h, units=2, activation=None),
                self.tf_keep_prob)

            self.loss = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits_v2(
                    labels=tf.one_hot(self.tf_target_matrix, 2),
                    logits=self.logits,
                    name="loss_function"))

            self.pred = tf.argmax(self.logits, axis=-1)
            eq = tf.cast(
                tf.equal(tf.cast(self.pred, tf.int32), self.tf_target_matrix),
                tf.float32)
            self.acc = tf.reduce_mean(eq)

            self.opt = tf.train.AdamOptimizer(
                learning_rate=self.tf_learning_rate).minimize(self.loss)
Exemplo n.º 14
0
    def _initialize_weights(self):
        all_weights = dict()
        all_weights['enc_w0'] = tf.get_variable(
            "enc_w0",
            shape=[
                self.kernel_size[0], self.kernel_size[0], 1, self.n_hidden[0]
            ],
            initializer=layers.xavier_initializer_conv2d(),
            regularizer=self.reg)
        all_weights['enc_b0'] = tf.Variable(
            tf.zeros([self.n_hidden[0]], dtype=tf.float32))

        all_weights['enc_w1'] = tf.get_variable(
            "enc_w1",
            shape=[
                self.kernel_size[1], self.kernel_size[1], self.n_hidden[0],
                self.n_hidden[1]
            ],
            initializer=layers.xavier_initializer_conv2d(),
            regularizer=self.reg)
        all_weights['enc_b1'] = tf.Variable(
            tf.zeros([self.n_hidden[1]], dtype=tf.float32))

        all_weights['enc_w2'] = tf.get_variable(
            "enc_w2",
            shape=[
                self.kernel_size[2], self.kernel_size[2], self.n_hidden[1],
                self.n_hidden[2]
            ],
            initializer=layers.xavier_initializer_conv2d(),
            regularizer=self.reg)
        all_weights['enc_b2'] = tf.Variable(
            tf.zeros([self.n_hidden[2]], dtype=tf.float32))

        all_weights['dec_w0'] = tf.get_variable(
            "dec_w0",
            shape=[
                self.kernel_size[2], self.kernel_size[2], self.n_hidden[1],
                self.n_hidden[2]
            ],
            initializer=layers.xavier_initializer_conv2d(),
            regularizer=self.reg)
        all_weights['dec_b0'] = tf.Variable(
            tf.zeros([self.n_hidden[1]], dtype=tf.float32))

        all_weights['dec_w1'] = tf.get_variable(
            "dec_w1",
            shape=[
                self.kernel_size[1], self.kernel_size[1], self.n_hidden[0],
                self.n_hidden[1]
            ],
            initializer=layers.xavier_initializer_conv2d(),
            regularizer=self.reg)
        all_weights['dec_b1'] = tf.Variable(
            tf.zeros([self.n_hidden[0]], dtype=tf.float32))

        all_weights['dec_w2'] = tf.get_variable(
            "dec_w2",
            shape=[
                self.kernel_size[0], self.kernel_size[0], 1, self.n_hidden[0]
            ],
            initializer=layers.xavier_initializer_conv2d(),
            regularizer=self.reg)
        all_weights['dec_b2'] = tf.Variable(tf.zeros([1], dtype=tf.float32))
        return all_weights
 def conv(X, in_ch, out_ch, name):
     with tf.variable_scope(name) as scope:
         W_conv = tf.get_variable(name='weights', shape=[3, 3, in_ch, out_ch], initializer=xavier_initializer_conv2d())
         h_bn = tf.layers.batch_normalization(tf.nn.conv2d(X, W_conv, strides=[1, 1, 1, 1], padding='SAME'), training =trainphase)
         h_conv = my_relu(h_bn)
     return h_conv
    def _initialize_weights(self):
        all_weights = dict()
        all_weights['enc_w0'] = tf.get_variable(
            "enc_w0",
            shape=[
                self.kernel_size[0], self.kernel_size[0], 1, self.n_hidden[0]
            ],
            initializer=layers.xavier_initializer_conv2d(),
            regularizer=self.reg)
        all_weights['enc_b0'] = tf.Variable(
            tf.zeros([self.n_hidden[0]], dtype=tf.float32))

        all_weights['enc_w1'] = tf.get_variable(
            "enc_w1",
            shape=[
                self.kernel_size[1], self.kernel_size[1], self.n_hidden[0],
                self.n_hidden[1]
            ],
            initializer=layers.xavier_initializer_conv2d(),
            regularizer=self.reg)
        all_weights['enc_b1'] = tf.Variable(
            tf.zeros([self.n_hidden[1]], dtype=tf.float32))

        all_weights['enc_w2'] = tf.get_variable(
            "enc_w2",
            shape=[
                self.kernel_size[2], self.kernel_size[2], self.n_hidden[1],
                self.n_hidden[2]
            ],
            initializer=layers.xavier_initializer_conv2d(),
            regularizer=self.reg)
        all_weights['enc_b2'] = tf.Variable(
            tf.zeros([self.n_hidden[2]], dtype=tf.float32))

        all_weights['Coef'] = tf.Variable(
            1.0e-4 * tf.ones([self.batch_size, self.batch_size], tf.float32),
            name='Coef')

        all_weights['dec_w0'] = tf.get_variable(
            "dec_w0",
            shape=[
                self.kernel_size[2], self.kernel_size[2], self.n_hidden[1],
                self.n_hidden[2]
            ],
            initializer=layers.xavier_initializer_conv2d(),
            regularizer=self.reg)
        all_weights['dec_b0'] = tf.Variable(
            tf.zeros([self.n_hidden[1]], dtype=tf.float32))

        all_weights['dec_w1'] = tf.get_variable(
            "dec_w1",
            shape=[
                self.kernel_size[1], self.kernel_size[1], self.n_hidden[0],
                self.n_hidden[1]
            ],
            initializer=layers.xavier_initializer_conv2d(),
            regularizer=self.reg)
        all_weights['dec_b1'] = tf.Variable(
            tf.zeros([self.n_hidden[0]], dtype=tf.float32))

        all_weights['dec_w2'] = tf.get_variable(
            "dec_w2",
            shape=[
                self.kernel_size[0], self.kernel_size[0], 1, self.n_hidden[0]
            ],
            initializer=layers.xavier_initializer_conv2d(),
            regularizer=self.reg)
        all_weights['dec_b2'] = tf.Variable(tf.zeros([1], dtype=tf.float32))

        all_weights['oenc_w0'] = tf.get_variable(
            "oenc_w0",
            shape=[
                self.kernel_size[0], self.kernel_size[0], self.n_hidden[0], 1
            ],
            initializer=layers.xavier_initializer_conv2d(),
            regularizer=self.reg)
        all_weights['oenc_b0'] = tf.Variable(tf.zeros([1], dtype=tf.float32))

        all_weights['oenc_w1'] = tf.get_variable(
            "oenc_w1",
            shape=[
                self.kernel_size[1], self.kernel_size[1], self.n_hidden[2],
                self.n_hidden[0]
            ],
            initializer=layers.xavier_initializer_conv2d(),
            regularizer=self.reg)
        all_weights['oenc_b1'] = tf.Variable(
            tf.zeros([self.n_hidden[2]], dtype=tf.float32))

        all_weights['oenc_w2'] = tf.get_variable(
            "oenc_w2",
            shape=[
                self.kernel_size[2], self.kernel_size[2], self.n_hidden[2],
                self.n_hidden[1]
            ],
            initializer=layers.xavier_initializer_conv2d(),
            regularizer=self.reg)
        all_weights['oenc_b2'] = tf.Variable(
            tf.zeros([self.n_hidden[2]], dtype=tf.float32))

        all_weights['oCoef'] = tf.Variable(
            1.0e-4 * tf.ones([self.batch_size, self.batch_size], tf.float32),
            name='Coef')

        all_weights['odec_w0'] = tf.get_variable(
            "odec_w0",
            shape=[
                self.kernel_size[2], self.kernel_size[2], self.n_hidden[1],
                self.n_hidden[2]
            ],
            initializer=layers.xavier_initializer_conv2d(),
            regularizer=self.reg)
        all_weights['odec_b0'] = tf.Variable(
            tf.zeros([self.n_hidden[2]], dtype=tf.float32))

        all_weights['odec_w1'] = tf.get_variable(
            "odec_w1",
            shape=[
                self.kernel_size[1], self.kernel_size[1], self.n_hidden[0],
                self.n_hidden[2]
            ],
            initializer=layers.xavier_initializer_conv2d(),
            regularizer=self.reg)
        all_weights['odec_b1'] = tf.Variable(
            tf.zeros([self.n_hidden[2]], dtype=tf.float32))

        all_weights['odec_w2'] = tf.get_variable(
            "odec_w2",
            shape=[self.kernel_size[0], self.kernel_size[0], 3, 3],
            initializer=layers.xavier_initializer_conv2d(),
            regularizer=self.reg)
        all_weights['odec_b2'] = tf.Variable(tf.zeros([3], dtype=tf.float32))

        all_weights['odec_w3'] = tf.get_variable(
            "odec_w3",
            shape=[self.kernel_size[0], self.kernel_size[0], 3, 3],
            initializer=layers.xavier_initializer_conv2d(),
            regularizer=self.reg)
        all_weights['odec_b3'] = tf.Variable(tf.zeros([3], dtype=tf.float32))
        return all_weights
Exemplo n.º 17
0
	def __conv_relu(self, input, kernel_shape, bias_shape, name):
		weights = tf.get_variable("W"+name, kernel_shape, initializer=xavier_initializer_conv2d())
		biases = tf.get_variable("b"+name, bias_shape, initializer=tf.constant_initializer(0.0))
		conv = tf.nn.conv2d(input, weights, strides=[1, 1, 1, 1], padding='SAME', name=name+"_conv")
		return tf.nn.relu(conv + biases)
Exemplo n.º 18
0
def siamese_net(input, reuse=False, is_training=True):  # without batch norm
    with tf.name_scope("model"):
        with tf.variable_scope("conv1_1") as scope:
            # ,normalizer_fn=slim.batch_norm,normalizer_params={'is_training':is_training}
            # tf.truncated_normal_initializer(stddev=0.001)
            conv1_1 = layers.conv2d(
                input,
                48, [3, 3],
                activation_fn=tf.nn.relu,
                weights_initializer=layers.xavier_initializer_conv2d(),
                weights_regularizer=slim.l2_regularizer(0.0001),
                scope=scope,
                reuse=reuse)
        with tf.variable_scope("conv1_2") as scope:
            conv1_2 = layers.conv2d(
                conv1_1,
                48, [3, 3],
                activation_fn=tf.nn.relu,
                weights_initializer=layers.xavier_initializer_conv2d(),
                weights_regularizer=slim.l2_regularizer(0.0001),
                scope=scope,
                reuse=reuse)
        with tf.variable_scope("conv2_1") as scope:
            conv2_1 = layers.avg_pool2d(conv1_2, [2, 2])
        with tf.variable_scope("conv2_2") as scope:
            conv2_2 = layers.conv2d(
                conv2_1,
                96, [3, 3],
                activation_fn=tf.nn.relu,
                weights_initializer=layers.xavier_initializer_conv2d(),
                weights_regularizer=slim.l2_regularizer(0.0001),
                scope=scope,
                reuse=reuse)
        with tf.variable_scope("conv2_3") as scope:
            conv2_3 = layers.conv2d(
                conv2_2,
                96, [3, 3],
                activation_fn=tf.nn.relu,
                weights_initializer=layers.xavier_initializer_conv2d(),
                weights_regularizer=slim.l2_regularizer(0.0001),
                scope=scope,
                reuse=reuse)
        with tf.variable_scope("conv3_1") as scope:
            conv3_1 = layers.avg_pool2d(conv2_3, [2, 2])
        with tf.variable_scope("conv3_2") as scope:
            conv3_2 = layers.conv2d(
                conv3_1,
                192, [3, 3],
                activation_fn=tf.nn.relu,
                weights_initializer=layers.xavier_initializer_conv2d(),
                weights_regularizer=slim.l2_regularizer(0.0001),
                scope=scope,
                reuse=reuse)
        with tf.variable_scope("conv3_3") as scope:
            conv3_3 = layers.conv2d(
                conv3_2,
                192, [3, 3],
                activation_fn=tf.nn.relu,
                weights_initializer=layers.xavier_initializer_conv2d(),
                weights_regularizer=slim.l2_regularizer(0.0001),
                scope=scope,
                reuse=reuse)
        with tf.variable_scope("conv4_1") as scope:
            conv4_1 = layers.avg_pool2d(conv3_3, [2, 2])
        with tf.variable_scope("conv4_2") as scope:
            conv4_2 = layers.conv2d(
                conv4_1,
                384, [3, 3],
                activation_fn=tf.nn.relu,
                weights_initializer=layers.xavier_initializer_conv2d(),
                weights_regularizer=slim.l2_regularizer(0.0001),
                scope=scope,
                reuse=reuse)
        with tf.variable_scope("conv4_3") as scope:
            conv4_3 = layers.conv2d(
                conv4_2,
                384, [3, 3],
                activation_fn=tf.nn.relu,
                weights_initializer=layers.xavier_initializer_conv2d(),
                weights_regularizer=slim.l2_regularizer(0.0001),
                scope=scope,
                reuse=reuse)

        with tf.variable_scope("conv43") as scope:
            conv43 = layers.conv2d_transpose(
                conv4_3,
                192, [2, 2],
                stride=2,
                activation_fn=None,
                weights_initializer=layers.xavier_initializer_conv2d(),
                weights_regularizer=slim.l2_regularizer(0.0001),
                scope=scope,
                reuse=reuse)
        with tf.variable_scope("conv3_4") as scope:
            conv3_4 = tf.concat([conv43, conv3_3], 3)
        with tf.variable_scope("conv3_5") as scope:
            conv3_5 = layers.conv2d(
                conv3_4,
                192, [3, 3],
                activation_fn=tf.nn.relu,
                weights_initializer=layers.xavier_initializer_conv2d(),
                weights_regularizer=slim.l2_regularizer(0.0001),
                scope=scope,
                reuse=reuse)
        with tf.variable_scope("conv3_6") as scope:
            conv3_6 = layers.conv2d(
                conv3_5,
                192, [3, 3],
                activation_fn=tf.nn.relu,
                weights_initializer=layers.xavier_initializer_conv2d(),
                weights_regularizer=slim.l2_regularizer(0.0001),
                scope=scope,
                reuse=reuse)
        with tf.variable_scope("conv32") as scope:
            conv32 = layers.conv2d_transpose(
                conv3_6,
                64, [2, 2],
                stride=2,
                activation_fn=None,
                weights_initializer=layers.xavier_initializer_conv2d(),
                weights_regularizer=slim.l2_regularizer(0.0001),
                scope=scope,
                reuse=reuse)
        with tf.variable_scope("conv2_4") as scope:
            conv2_4 = tf.concat([conv32, conv2_3], 3)
        with tf.variable_scope("conv2_5") as scope:
            conv2_5 = layers.conv2d(
                conv2_4,
                96, [3, 3],
                activation_fn=tf.nn.relu,
                weights_initializer=layers.xavier_initializer_conv2d(),
                weights_regularizer=slim.l2_regularizer(0.0001),
                scope=scope,
                reuse=reuse)
        with tf.variable_scope("conv2_6") as scope:
            conv2_6 = layers.conv2d(
                conv2_5,
                96, [3, 3],
                activation_fn=tf.nn.relu,
                weights_initializer=layers.xavier_initializer_conv2d(),
                weights_regularizer=slim.l2_regularizer(0.0001),
                scope=scope,
                reuse=reuse)
        with tf.variable_scope("conv21") as scope:
            conv21 = layers.conv2d_transpose(
                conv2_6,
                48, [2, 2],
                stride=2,
                activation_fn=None,
                weights_initializer=layers.xavier_initializer_conv2d(),
                weights_regularizer=slim.l2_regularizer(0.0001),
                scope=scope,
                reuse=reuse)
        with tf.variable_scope("conv1_3") as scope:
            conv1_3 = tf.concat([conv21, conv1_2], 3)
        with tf.variable_scope("conv1_4") as scope:
            conv1_4 = layers.conv2d(
                conv1_3,
                48, [3, 3],
                activation_fn=tf.nn.relu,
                weights_initializer=layers.xavier_initializer_conv2d(),
                weights_regularizer=slim.l2_regularizer(0.0001),
                scope=scope,
                reuse=reuse)
        with tf.variable_scope("conv1_5") as scope:
            conv1_5 = layers.conv2d(
                conv1_4,
                24, [3, 3],
                activation_fn=tf.nn.relu,
                weights_initializer=layers.xavier_initializer_conv2d(),
                weights_regularizer=slim.l2_regularizer(0.0001),
                scope=scope,
                reuse=reuse)
        with tf.variable_scope("conv1_6") as scope:
            conv1_6 = layers.conv2d(
                conv1_5,
                1, [3, 3],
                activation_fn=None,
                weights_initializer=layers.xavier_initializer_conv2d(),
                weights_regularizer=slim.l2_regularizer(0.0001),
                scope=scope,
                reuse=reuse)
    return conv1_6
Exemplo n.º 19
0
def mru_conv_block_v3(inp, ht, filter_depth, sn,
                      stride, dilate=1,
                      activation_fn=tf.nn.relu,
                      normalizer_fn=None,
                      normalizer_params=None,
                      weights_initializer=ly.xavier_initializer_conv2d(),
                      biases_initializer_mask=tf.constant_initializer(
                          value=0.5),
                      biases_initializer_h=tf.constant_initializer(value=-1),
                      data_format='NCHW',
                      weight_decay_rate=1e-8,
                      norm_mask=False,
                      norm_input=True,
                      deconv=False):

    def norm_activ(tensor_in):
        if normalizer_fn is not None:
            _normalizer_params = normalizer_params or {}
            tensor_normed = normalizer_fn(tensor_in, **_normalizer_params)
        else:
            tensor_normed = tf.identity(tensor_in)
        if activation_fn is not None:
            tensor_normed = activation_fn(tensor_normed)

        return tensor_normed

    channel_index = 1 if data_format == 'NCHW' else 3
    reduce_dim = [2, 3] if data_format == 'NCHW' else [1, 2]
    hidden_depth = ht.get_shape().as_list()[channel_index]
    regularizer = ly.l2_regularizer(
        weight_decay_rate) if weight_decay_rate > 0 else None
    weights_initializer_mask = weights_initializer
    biases_initializer = tf.zeros_initializer()

    if norm_mask:
        mask_normalizer_fn = normalizer_fn
        mask_normalizer_params = normalizer_params
    else:
        mask_normalizer_fn = None
        mask_normalizer_params = None

    if deconv:
        if stride == 2:
            ht = upsample(ht, data_format=data_format)
        elif stride != 1:
            raise NotImplementedError

    ht_orig = tf.identity(ht)

    # Normalize hidden state
    with tf.variable_scope('norm_activation_in') as sc:
        if norm_input:
            full_inp = tf.concat([norm_activ(ht), inp], axis=channel_index)
        else:
            full_inp = tf.concat([ht, inp], axis=channel_index)

    # update gate
    rg = conv2d2(full_inp, hidden_depth, 3, sn=sn, stride=1, rate=dilate,
                 data_format=data_format, activation_fn=lrelu,
                 normalizer_fn=mask_normalizer_fn, normalizer_params=mask_normalizer_params,
                 weights_regularizer=regularizer,
                 weights_initializer=weights_initializer_mask,
                 biases_initializer=biases_initializer_mask,
                 scope='update_gate')
    rg = (rg - tf.reduce_min(rg, axis=reduce_dim, keep_dims=True)) / (
        tf.reduce_max(rg, axis=reduce_dim, keep_dims=True) - tf.reduce_min(rg, axis=reduce_dim, keep_dims=True))

    # Input Image conv
    img_new = conv2d2(inp, hidden_depth, 3, sn=sn, stride=1, rate=dilate,
                      data_format=data_format, activation_fn=None,
                      normalizer_fn=None, normalizer_params=None,
                      biases_initializer=biases_initializer,
                      weights_regularizer=regularizer,
                      weights_initializer=weights_initializer)

    ht_plus = ht + rg * img_new
    with tf.variable_scope('norm_activation_merge_1') as sc:
        ht_new_in = norm_activ(ht_plus)

    # new hidden state
    h_new = conv2d2(ht_new_in, filter_depth, 3, sn=sn, stride=1, rate=dilate,
                    data_format=data_format, activation_fn=activation_fn,
                    normalizer_fn=normalizer_fn, normalizer_params=normalizer_params,
                    biases_initializer=biases_initializer,
                    weights_regularizer=regularizer,
                    weights_initializer=weights_initializer)
    h_new = conv2d2(h_new, filter_depth, 3, sn=sn, stride=1, rate=dilate,
                    data_format=data_format, activation_fn=None,
                    normalizer_fn=None, normalizer_params=None,
                    biases_initializer=biases_initializer,
                    weights_regularizer=regularizer,
                    weights_initializer=weights_initializer)

    # new hidden state out
    # linear project for filter depth
    if ht.get_shape().as_list()[channel_index] != filter_depth:
        ht_orig = conv2d2(ht_orig, filter_depth, 1, sn=sn, stride=1,
                          data_format=data_format, activation_fn=None,
                          normalizer_fn=None, normalizer_params=None,
                          biases_initializer=biases_initializer,
                          weights_regularizer=regularizer,
                          weights_initializer=weights_initializer)
    ht_new = ht_orig + h_new

    if not deconv:
        if stride == 2:
            ht_new = mean_pool(ht_new, data_format=data_format)
        elif stride != 1:
            raise NotImplementedError

    return ht_new
def model(x, n_classes, keep_prob):
    mu = 0
    sigma = 0.1

    depth = {
        'D_1': 16,
        'D_2': 32,
        'D_3': 512,
        'D_4': 8192,
        'D_5': 256,
    }

    weights = {
        #     'W_conv1':tf.Variable(tf.truncated_normal(shape=[5, 5, 1,            depth["D_1"]],  mean = mu, stddev = sigma, name = 'weight1')),
        #     'W_conv2':tf.Variable(tf.truncated_normal(shape=[5, 5, depth["D_1"], depth["D_2"]],  mean = mu, stddev = sigma, name = 'weight2')),
        #     'W_conv3':tf.Variable(tf.truncated_normal(shape=[5, 5, depth["D_2"], depth["D_3"]],  mean = mu, stddev = sigma, name = 'weight3')),

        #     'W_fc1':  tf.Variable(tf.truncated_normal(shape=[8192, depth["D_4"]], mean = mu, stddev = sigma, name = 'weight4')),
        #     'W_fc2':  tf.Variable(tf.truncated_normal(shape=[depth["D_4"],     depth["D_5"]], mean = mu, stddev = sigma, name = 'weight5')),
        #     'W_out':  tf.Variable(tf.truncated_normal(shape=[depth["D_5"],     n_classes],    mean = mu, stddev = sigma, name = 'weight_out')),
        'W_conv1':
        tf.Variable(xavier_initializer_conv2d()([5, 5, 1, depth["D_1"]]),
                    name='weight1'),
        'W_conv2':
        tf.Variable(
            xavier_initializer_conv2d()([5, 5, depth["D_1"], depth["D_2"]]),
            name='weight2'),
        'W_conv3':
        tf.Variable(
            xavier_initializer_conv2d()([5, 5, depth["D_2"], depth["D_3"]]),
            name='weight3'),
        'W_fc1':
        tf.Variable(xavier_initializer_conv2d()([8192, depth["D_4"]]),
                    name='weight4'),
        'W_fc2':
        tf.Variable(xavier_initializer_conv2d()([depth["D_4"], depth["D_5"]]),
                    name='weight5'),
        'W_out':
        tf.Variable(xavier_initializer_conv2d()([depth["D_5"], n_classes]),
                    name='weight_out'),
    }

    biases = {
        'B_1': tf.Variable(tf.zeros(depth['D_1']), name='bias_1'),
        'B_2': tf.Variable(tf.zeros(depth['D_2']), name='bias_2'),
        'B_3': tf.Variable(tf.zeros(depth['D_3']), name='bias_3'),
        'B_4': tf.Variable(tf.zeros(depth['D_4']), name='bias_4'),
        'B_5': tf.Variable(tf.zeros(depth['D_5']), name='bias_5'),
        'B_out': tf.Variable(tf.zeros(n_classes), name='bias_out')
    }

    # print(x)
    # Layer 1: Convolutional. Input = 32x32x1. Output = 32x32x1.
    conv1 = tf.nn.conv2d(
        x, weights['W_conv1'], strides=[1, 1, 1, 1
                                        ], padding='SAME') + biases['B_1']
    conv1 = tf.nn.relu(conv1)  # Activation.
    # Pooling. Input = 32x32x1. Output = 14x14x16.
    conv1 = tf.nn.max_pool(conv1,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='SAME')

    # Layer 2: Convolutional. Output = 10x10x32.
    conv2 = tf.nn.conv2d(
        conv1, weights['W_conv2'], strides=[1, 1, 1, 1
                                            ], padding='SAME') + biases['B_2']
    conv2 = tf.nn.relu(conv2)  # Activation.
    # Pooling. Input = 10x10x32. Output = 5x5x32.
    conv2 = tf.nn.max_pool(conv2,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='SAME')

    # Layer 2: Convolutional. Output = 10x10x32.
    conv3 = tf.nn.conv2d(
        conv2, weights['W_conv3'], strides=[1, 1, 1, 1
                                            ], padding='SAME') + biases['B_3']
    conv3 = tf.nn.relu(conv3)  # Activation.
    # Pooling. Input = 10x10x32. Output = 5x5x32.
    conv3 = tf.nn.max_pool(conv3,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='SAME')

    # Flatten. Input = 5x5x32. Output = 800.
    fc0 = flatten(conv3)

    # print("fc0", fc0, "weights:", weights["W_fc1"])
    # Layer 3: Fully Connected. Input = 800. Output = 120.
    fc1 = tf.matmul(fc0, weights['W_fc1']) + biases['B_4']
    fc1 = tf.nn.relu(fc1)  # Activation.

    # Layer 4: Fully Connected. Input = 120. Output = 84.
    fc2 = tf.matmul(fc1, weights['W_fc2']) + biases['B_5']
    fc2 = tf.nn.relu(fc2)  # Activation.

    # Layer 5: Fully Connected. Input = 84. Output = 10.
    logits = tf.matmul(fc2, weights['W_out']) + biases['B_out']

    return logits
Exemplo n.º 21
0
    def apply_logit(self):

        with tf.variable_scope(self.modelname + "conv_1") as scope:
            self.W_conv1 = tf.get_variable(
                name='weights',
                shape=[1, 5, 1, 5],
                initializer=xavier_initializer_conv2d())
            self.h_bn1 = tf.layers.batch_normalization(
                tf.nn.conv2d(self.X,
                             self.W_conv1,
                             strides=[1, 1, 1, 1],
                             padding='VALID'),
                training=self.trainphase)
            self.h_conv1 = tf.nn.leaky_relu(self.h_bn1)

        with tf.variable_scope(self.modelname + "conv_2") as scope:
            self.W_conv2 = tf.get_variable(
                name='weights',
                shape=[2, 5, 1, 5],
                initializer=xavier_initializer_conv2d())
            self.h_bn2 = tf.layers.batch_normalization(
                tf.nn.conv2d(self.X,
                             self.W_conv2,
                             strides=[1, 1, 1, 1],
                             padding='VALID'),
                training=self.trainphase)
            self.h_conv2 = tf.nn.leaky_relu(self.h_bn2)

        with tf.variable_scope(self.modelname + "conv_3") as scope:
            self.W_conv3 = tf.get_variable(
                name='weights',
                shape=[3, 5, 1, 5],
                initializer=xavier_initializer_conv2d())
            self.h_bn3 = tf.layers.batch_normalization(
                tf.nn.conv2d(self.X,
                             self.W_conv3,
                             strides=[1, 1, 1, 1],
                             padding='VALID'),
                training=self.trainphase)
            self.h_conv3 = tf.nn.leaky_relu(self.h_bn3)

        with tf.variable_scope(self.modelname + "conv_4") as scope:
            self.W_conv4 = tf.get_variable(
                name='weights',
                shape=[4, 5, 1, 5],
                initializer=xavier_initializer_conv2d())
            self.h_bn4 = tf.layers.batch_normalization(
                tf.nn.conv2d(self.X,
                             self.W_conv4,
                             strides=[1, 1, 1, 1],
                             padding='VALID'),
                training=self.trainphase)
            self.h_conv4 = tf.nn.leaky_relu(self.h_bn4)

        self.pooled1 = tf.reshape(self.h_conv1,
                                  shape=[-1, int(self.duration), 5])
        self.pooled1 = tf.reduce_mean(self.pooled1, reduction_indices=2)
        self.pooled1 = tf.squeeze(self.pooled1)

        self.pooled2 = tf.reshape(self.h_conv2,
                                  shape=[-1, int(self.duration - 1), 5])
        self.pooled2 = tf.reduce_mean(self.pooled2, reduction_indices=2)
        self.pooled2 = tf.squeeze(self.pooled2)

        self.pooled3 = tf.reshape(self.h_conv3,
                                  shape=[-1, int(self.duration - 2), 5])
        self.pooled3 = tf.reduce_mean(self.pooled3, reduction_indices=2)
        self.pooled3 = tf.squeeze(self.pooled3)
        '''
        self.pooled4 = tf.reshape(self.h_conv4, shape=[-1, int(self.duration - 3), 5])
        self.pooled4 = tf.reduce_mean(self.pooled4, reduction_indices=2)
        self.pooled4 = tf.squeeze(self.pooled4)
        '''
        '''
        self.pooled1_flatten = tf.reshape(self.h_conv1,shape=[-1,int(self.duration)*5])
        self.pooled2_flatten = tf.reshape(self.h_conv2,shape=[-1,int(self.duration-1)*5])
        self.pooled3_flatten = tf.reshape(self.h_conv3, shape=[-1, int(self.duration-2)*5])
        self.pooled4_flatten = tf.reshape(self.h_conv4, shape=[-1, int(self.duration-3)*5])
        '''

        #self.pooled1_flatten = tf.reshape(self.pooled1,shape=[-1,self.duration])
        self.pooled2_flatten = tf.reshape(self.pooled2,
                                          shape=[-1, self.duration - 1])
        self.pooled3_flatten = tf.reshape(self.pooled3,
                                          shape=[-1, self.duration - 2])
        #self.pooled4_flatten = tf.reshape(self.pooled4, shape=[-1, self.duration - 3])

        #self.flatten = tf.concat([self.pooled1_flatten,self.pooled2_flatten,self.pooled3_flatten,self.pooled4_flatten],1)
        self.flatten = tf.concat([self.pooled2_flatten, self.pooled3_flatten],
                                 1)

        num_unit = 300
        H = self.linear(self.flatten, self.flatten.shape[1], num_unit, 'L1')
        H = self.linear(H, num_unit, num_unit, "L2")
        #H = self.linear(H,num_unit,num_unit,"L3")
        #H = self.linear(H, num_unit, num_unit, "L4")
        #H = self.linear(H, num_unit, num_unit, "L5")
        #H = self.linear(H, num_unit, num_unit, "L6")

        #H = self.linear(H,num_unit,num_unit,"L7")
        #H = self.linear(H,num_unit,num_unit,"L8")
        #H = self.linear(H,num_unit,num_unit,"L9")
        #H = self.linear(H,num_unit,num_unit,"L10")
        #H = self.linear(H,100,100,"L7")
        #H = self.linear(H,100,100,"L8")
        #H = self.linear(H,100,100,"L9")

        self.output_features = self.linear(H, num_unit, 128, "Llast")
        self.output = self.linear(self.output_features,
                                  128,
                                  1,
                                  "output",
                                  active_f=None)
        return self.output
Exemplo n.º 22
0
    def __init__(self,
                 n_tags: int,
                 word_vocab,
                 word_dim: int,
                 word_emb_path: str,
                 word_emb_name: str = None,
                 char_vocab_size: int = None,
                 pos_vocab_size: int = None,
                 chunk_vocab_size: int = None,
                 char_dim: int = None,
                 elmo_dim: int = None,
                 pos_dim: int = None,
                 chunk_dim: int = None,
                 cap_dim: int = None,
                 cap_vocab_size: int = 5,
                 lstm_hidden_size: int = 256,
                 dropout_keep_prob: float = 0.5,
                 **kwargs) -> None:

        assert n_tags != 0, 'Number of classes equal 0! It seems that vocabularies is not loaded.' \
                            ' Check that all vocabulary files are downloaded!'

        if 'learning_rate_drop_div' not in kwargs:
            kwargs['learning_rate_drop_div'] = 10.0
        if 'learning_rate_drop_patience' not in kwargs:
            kwargs['learning_rate_drop_patience'] = 5.0
        if 'clip_norm' not in kwargs:
            kwargs['clip_norm'] = 5.0
        super().__init__(**kwargs)

        word2id = word_vocab.t2i

        self._dropout_ph = tf.placeholder_with_default(dropout_keep_prob,
                                                       shape=[],
                                                       name='dropout')
        self.training_ph = tf.placeholder_with_default(False,
                                                       shape=[],
                                                       name='is_training')
        self._y_ph = tf.placeholder(tf.int32, [None, None], name='y_ph')

        self._xs_ph_list = []
        self._input_features = []

        # use for word contextualized bi-lstm, elmo
        self.real_sent_lengths_ph = tf.placeholder(tf.int32, [None],
                                                   name="real_sent_lengths")
        self._xs_ph_list.append(self.real_sent_lengths_ph)

        # Word emb
        with tf.variable_scope("word_emb"):
            word_ids_ph = tf.placeholder(tf.int32, [None, None],
                                         name="word_ids")
            self._xs_ph_list.append(word_ids_ph)

            word_embeddings = self.load_pretrained_word_emb(
                word_emb_path, word_emb_name, word_dim, word2id)

            word_lookup_table = tf.Variable(word_embeddings,
                                            dtype=tf.float32,
                                            trainable=True,
                                            name="word_embeddings")
            word_emb = tf.nn.embedding_lookup(word_lookup_table,
                                              word_ids_ph,
                                              name="embedded_word")
            self._input_features.append(word_emb)

        # POS feature
        if pos_dim is not None:
            with tf.variable_scope("pos_emb"):
                pos_ph = tf.placeholder(tf.int32, [None, None], name="pos_ids")
                self._xs_ph_list.append(pos_ph)

                tf_pos_embeddings = tf.get_variable(
                    name="pos_embeddings",
                    dtype=tf.float32,
                    shape=[pos_vocab_size, pos_dim],
                    trainable=True,
                    initializer=xavier_initializer())

                embedded_pos = tf.nn.embedding_lookup(tf_pos_embeddings,
                                                      pos_ph,
                                                      name="embedded_pos")
                self._input_features.append(embedded_pos)

        # Chunk feature
        if chunk_dim is not None:
            with tf.variable_scope("chunk_emb"):
                chunk_ph = tf.placeholder(tf.int32, [None, None],
                                          name="chunk_ids")
                self._xs_ph_list.append(chunk_ph)

                tf_chunk_embeddings = tf.get_variable(
                    name="chunk_embeddings",
                    dtype=tf.float32,
                    shape=[chunk_vocab_size, chunk_dim],
                    trainable=True,
                    initializer=xavier_initializer())

                embedded_chunk = tf.nn.embedding_lookup(tf_chunk_embeddings,
                                                        chunk_ph,
                                                        name="embedded_chunk")
                self._input_features.append(embedded_chunk)

        # Capitalization feature
        if cap_dim is not None:
            with tf.variable_scope("cap_emb"):
                cap_ph = tf.placeholder(tf.int32, [None, None], name="cap_ids")
                self._xs_ph_list.append(cap_ph)

                tf_cap_embeddings = tf.get_variable(
                    name="cap_embeddings",
                    dtype=tf.float32,
                    shape=[cap_vocab_size, cap_dim],
                    trainable=True,
                    initializer=xavier_initializer())

                embedded_cap = tf.nn.embedding_lookup(tf_cap_embeddings,
                                                      cap_ph,
                                                      name="embedded_cap")
                self._input_features.append(embedded_cap)

        # Character feature
        if char_dim is not None:
            with tf.variable_scope("char_emb"):
                char_ids_ph = tf.placeholder(tf.int32, [None, None, None],
                                             name="char_ids")
                self._xs_ph_list.append(char_ids_ph)

                tf_char_embeddings = tf.get_variable(
                    name="char_embeddings",
                    dtype=tf.float32,
                    shape=[char_vocab_size, char_dim],
                    trainable=True,
                    initializer=xavier_initializer())
                embedded_cnn_chars = tf.nn.embedding_lookup(
                    tf_char_embeddings, char_ids_ph, name="embedded_cnn_chars")
                conv1 = tf.layers.conv2d(
                    inputs=embedded_cnn_chars,
                    filters=128,
                    kernel_size=(1, 3),
                    strides=(1, 1),
                    padding="same",
                    name="conv1",
                    kernel_initializer=xavier_initializer_conv2d())
                conv2 = tf.layers.conv2d(
                    inputs=conv1,
                    filters=128,
                    kernel_size=(1, 3),
                    strides=(1, 1),
                    padding="same",
                    name="conv2",
                    kernel_initializer=xavier_initializer_conv2d())
                char_cnn = tf.reduce_max(conv2, axis=2)

                self._input_features.append(char_cnn)

        # ELMo
        if elmo_dim is not None:
            with tf.variable_scope("elmo_emb"):
                padded_x_tokens_ph = tf.placeholder(tf.string, [None, None],
                                                    name="padded_x_tokens")
                self._xs_ph_list.append(padded_x_tokens_ph)

                elmo = hub.Module("https://tfhub.dev/google/elmo/2",
                                  trainable=True)
                emb = elmo(inputs={
                    "tokens": padded_x_tokens_ph,
                    "sequence_len": self.real_sent_lengths_ph
                },
                           signature="tokens",
                           as_dict=True)["elmo"]
                elmo_emb = tf.layers.dense(emb, elmo_dim, activation=None)
                self._input_features.append(elmo_emb)

        features = tf.nn.dropout(tf.concat(self._input_features, axis=2),
                                 self._dropout_ph)

        with tf.variable_scope("bi_lstm_words"):
            cell_fw = tf.contrib.rnn.LSTMCell(lstm_hidden_size)
            cell_bw = tf.contrib.rnn.LSTMCell(lstm_hidden_size)
            (output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(
                cell_fw,
                cell_bw,
                features,
                sequence_length=self.real_sent_lengths_ph,
                dtype=tf.float32)
            self.output = tf.concat([output_fw, output_bw], axis=-1)

            ntime_steps = tf.shape(self.output)[1]
            self.output = tf.reshape(self.output, [-1, 2 * lstm_hidden_size])
            layer1 = tf.nn.dropout(
                tf.layers.dense(inputs=self.output,
                                units=lstm_hidden_size,
                                activation=None,
                                kernel_initializer=xavier_initializer()),
                self._dropout_ph)
            pred = tf.layers.dense(inputs=layer1,
                                   units=n_tags,
                                   activation=None,
                                   kernel_initializer=xavier_initializer())
            self.logits = tf.reshape(pred, [-1, ntime_steps, n_tags])

            log_likelihood, self.transition_params = tf.contrib.crf.crf_log_likelihood(
                self.logits, self._y_ph, self.real_sent_lengths_ph)
        # loss and opt
        with tf.variable_scope("loss_and_opt"):
            self.loss = tf.reduce_mean(-log_likelihood)
            self.train_op = self.get_train_op(self.loss)

        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())
        self.load()
Exemplo n.º 23
0
def redResidua(input):
    out = tf.reshape(input, [-1, 128, 128, 1])

    out = conv2d(out,
                 64,
                 3,
                 padding='same',
                 activation=tf.nn.relu,
                 kernel_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)
    out = conv2d(out,
                 64,
                 3,
                 padding='same',
                 activation=tf.nn.relu,
                 kernel_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)
    out = max_pool2d(out, 3, stride=2)
    max1 = out

    out = conv2d(out,
                 64,
                 3,
                 padding='same',
                 activation=tf.nn.relu,
                 kernel_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)
    out = conv2d(out,
                 64,
                 3,
                 padding='same',
                 activation=tf.nn.relu,
                 kernel_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)
    out = out + max1
    r1 = out

    out = conv2d(out,
                 64,
                 3,
                 padding='same',
                 activation=tf.nn.relu,
                 kernel_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)
    out = conv2d(out,
                 64,
                 3,
                 padding='same',
                 activation=tf.nn.relu,
                 kernel_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)
    out = out + r1

    out = conv2d(out,
                 128,
                 3,
                 padding='same',
                 activation=tf.nn.relu,
                 kernel_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)

    out = max_pool2d(out, 3, stride=2)
    max2 = out

    out = conv2d(out,
                 128,
                 3,
                 padding='same',
                 activation=tf.nn.relu,
                 kernel_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)
    out = conv2d(out,
                 128,
                 3,
                 padding='same',
                 activation=tf.nn.relu,
                 kernel_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)
    out = out + max2
    r3 = out

    out = conv2d(out,
                 128,
                 3,
                 padding='same',
                 activation=tf.nn.relu,
                 kernel_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)
    out = conv2d(out,
                 128,
                 3,
                 padding='same',
                 activation=tf.nn.relu,
                 kernel_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)
    out = out + r3

    out = conv2d(out,
                 256,
                 3,
                 padding='same',
                 activation=tf.nn.relu,
                 kernel_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)
    out = max_pool2d(out, 3, stride=2)
    max3 = out

    out = conv2d(out,
                 256,
                 3,
                 padding='same',
                 activation=tf.nn.relu,
                 kernel_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)
    out = conv2d(out,
                 256,
                 3,
                 padding='same',
                 activation=tf.nn.relu,
                 kernel_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)
    out = out + max3
    r5 = out

    out = conv2d(out,
                 256,
                 3,
                 padding='same',
                 activation=tf.nn.relu,
                 kernel_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)
    out = conv2d(out,
                 256,
                 3,
                 padding='same',
                 activation=tf.nn.relu,
                 kernel_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)
    out = out + r5

    out = conv2d(out,
                 256,
                 3,
                 padding='same',
                 activation=tf.nn.relu,
                 kernel_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)
    out = max_pool2d(out, 3, stride=2)

    out = tf.contrib.layers.flatten(out)

    out = fully_connected(out,
                          1024,
                          activation_fn=tf.nn.relu,
                          weights_initializer=xavier_initializer())
    out = fully_connected(out,
                          100,
                          activation_fn=None,
                          weights_initializer=xavier_initializer())
    return out
Exemplo n.º 24
0
    def inference(self, img):
        self.padding = 'SAME'
        # initializer = tf.truncated_normal_initializer(stddev=0.01)
        self.initializer = layers.xavier_initializer_conv2d()
        self.initializer_b = layers.xavier_initializer()
        self.regularizer = slim.l2_regularizer(0.0005)
        self.activation = slim.layers.nn.leaky_relu
        self.activation = slim.layers.nn.relu

        print(self.n_filters_first_conv)
        with tf.variable_scope("first-conv"):

            stack = slim.conv2d(img,
                                num_outputs=self.n_filters_first_conv,
                                kernel_size=[3, 3],
                                weights_initializer=self.initializer,
                                activation_fn=self.activation,
                                padding=self.padding,
                                weights_regularizer=self.regularizer,
                                biases_initializer=self.initializer_b)

        print(stack.get_shape())
        n_filters = self.n_filters_first_conv

        pool_heads = []
        for i in range(self.n_pool + 1):
            print("Dense Block ", i + 1)
            with tf.variable_scope("block" + str(i + 1)):
                for j in range(self.n_layers_per_block[i]):
                    with tf.variable_scope("layer" + str(j + 1)):

                        #with tf.variable_scope("1x1conv"):
                        #conv=self.conv(stack,self.growth_rate*4,kernel_size=1)

                        with tf.variable_scope("3x3conv"):
                            conv = self.conv(stack,
                                             self.growth_rate,
                                             kernel_size=3)

                    stack = tf.concat([stack, conv], axis=-1)

                    n_filters += self.growth_rate
                    print(stack.get_shape(), " n=", n_filters)

                pool_heads.append(stack)

            if self.n_pool == i:
                continue
            with tf.variable_scope("Downsample" + str(i + 1)):
                stack = self.downSample(stack, int(n_filters * 0.5))
                print(n_filters / 2)

        stack = pool_heads[len(pool_heads) - 1]

        print("Turning up:", stack.get_shape())

        for i in range(self.n_pool):
            print("Dense Block ", i + self.n_pool + 1)

            with tf.variable_scope("Upsample" + str(i + 1)):

                concat = pool_heads[len(pool_heads) - i - 2]
                n_filters_keep = self.growth_rate * self.n_layers_per_block[
                    self.n_pool + i]
                print(n_filters)
                stack = self.upsample(stack, concat,
                                      int(n_filters_keep * self.compression))

            with tf.variable_scope("unpool-block" + str(i + 1)):

                for j in range(self.n_layers_per_block[self.n_pool + i + 1]):
                    with tf.variable_scope("layer" + str(j + 1)):

                        #with tf.variable_scope("1x1conv" + str(j + 1)):
                        #conv=self.conv(stack,self.growth_rate*4,kernel_size=1)
                        with tf.variable_scope("3x3conv" + str(j + 1)):
                            conv = self.conv(stack, self.growth_rate)

                    stack = tf.concat([stack, conv], axis=-1)
                    print(stack.get_shape())

        with tf.variable_scope("last-conv"):

            batchnorm = tf.layers.batch_normalization(stack)

            logits = slim.conv2d(inputs=batchnorm,
                                 num_outputs=self.n_classes + 1,
                                 kernel_size=1,
                                 padding='SAME',
                                 weights_initializer=self.initializer,
                                 biases_initializer=self.initializer_b,
                                 activation_fn=None)
            print("logits=", logits.get_shape())

        with tf.variable_scope("softmax"):
            pred = tf.argmax(tf.nn.softmax(logits), axis=-1)
            print(pred.get_shape())

        p = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
        #print(p)
        for i, a in enumerate(p):
            if (i % 8) == 3:
                #print(i)
                tf.summary.histogram(
                    a.name,
                    tf.Graph.get_tensor_by_name(tf.get_default_graph(),
                                                a.name))
        pred = tf.expand_dims(pred, axis=-1)
        print("pred==", pred.get_shape())
        tf.summary.image(
            "pred",
            tf.multiply(tf.constant(255, dtype=tf.uint8),
                        tf.cast(pred, tf.uint8)))

        # tf.summary.histogram("bias", kernel)

        return logits, pred
Exemplo n.º 25
0
 def _build_model(self, inputs):
     self.inputs = inputs
     if self.data_format == 'NCHW':
         channel_axis = 1
         _inputs = tf.cast(tf.transpose(inputs, [0, 3, 1, 2]), tf.float32)
     else:
         channel_axis = 3
         _inputs = tf.cast(inputs, tf.float32)
     self.L = []
     with arg_scope([layers.avg_pool2d], \
             padding='VALID', data_format=self.data_format):
         with tf.variable_scope('SRM_preprocess'):
             W_SRM = tf.get_variable('W', initializer=SRM_Kernels, \
                         dtype=tf.float32, \
                         regularizer=None)
             b = tf.get_variable('b', shape=[30], dtype=tf.float32, \
                         initializer=tf.constant_initializer(0.))
             self.L.append(tf.nn.bias_add( \
                     tf.nn.conv2d(_inputs, \
                     W_SRM, [1,1,1,1], 'VALID', \
                     data_format=self.data_format), b, \
                     data_format=self.data_format, name='Layer1'))
             self.L.append(tf.clip_by_value(self.L[-1], \
                           -self.tlu_threshold, self.tlu_threshold, \
                           name='TLU'))
         with tf.variable_scope('ConvNetwork'):
             with arg_scope([my_layers.conv2d], num_outputs=30, \
                     kernel_size=3, stride=1, padding='VALID', \
                     data_format=self.data_format, \
                     activation_fn=tf.nn.relu, \
                     weights_initializer=layers.xavier_initializer_conv2d(), \
                     weights_regularizer=layers.l2_regularizer(5e-4), \
                     biases_initializer=tf.constant_initializer(0.2), \
                     biases_regularizer=None), arg_scope([layers.batch_norm], \
                     decay=0.9, center=True, scale=True, \
                     updates_collections=None, is_training=self.is_training, \
                     fused=True, data_format=self.data_format):
                 if self.with_bn:
                     self.L.append(layers.batch_norm(self.L[-1], \
                                   scope='Norm1'))
                 self.L.append(my_layers.conv2d(self.L[-1], \
                               scope='Layer2'))
                 if self.with_bn:
                     self.L.append(layers.batch_norm(self.L[-1], \
                                   scope='Norm2'))
                 self.L.append(my_layers.conv2d(self.L[-1], \
                               scope='Layer3'))
                 if self.with_bn:
                     self.L.append(layers.batch_norm(self.L[-1], \
                                   scope='Norm3'))
                 self.L.append(my_layers.conv2d(self.L[-1], \
                               scope='Layer4'))
                 if self.with_bn:
                     self.L.append(layers.batch_norm(self.L[-1], \
                                   scope='Norm4'))
                 self.L.append(layers.avg_pool2d(self.L[-1], \
                               kernel_size=[2,2], scope='Stride1'))
                 with arg_scope([my_layers.conv2d], kernel_size=5, \
                                num_outputs=32):
                     self.L.append(my_layers.conv2d(self.L[-1], \
                                   scope='Layer5'))
                     if self.with_bn:
                         self.L.append(layers.batch_norm(self.L[-1], \
                                       scope='Norm5'))
                     self.L.append(layers.avg_pool2d(self.L[-1], \
                                   kernel_size=[3,3], \
                                   scope='Stride2'))
                     self.L.append(my_layers.conv2d(self.L[-1], \
                                   scope='Layer6'))
                     if self.with_bn:
                         self.L.append(layers.batch_norm(self.L[-1], \
                                       scope='Norm6'))
                     self.L.append(layers.avg_pool2d(self.L[-1], \
                                   kernel_size=[3,3], \
                                   scope='Stride3'))
                     self.L.append(my_layers.conv2d(self.L[-1], \
                                   scope='Layer7'))
                     if self.with_bn:
                         self.L.append(layers.batch_norm(self.L[-1], \
                                       scope='Norm7'))
                 self.L.append(layers.avg_pool2d(self.L[-1], \
                               kernel_size=[3,3], \
                               scope='Stride4'))
                 self.L.append(my_layers.conv2d(self.L[-1], \
                               num_outputs=16, \
                               scope='Layer8'))
                 if self.with_bn:
                     self.L.append(layers.batch_norm(self.L[-1], \
                                   scope='Norm8'))
                 self.L.append(my_layers.conv2d(self.L[-1], \
                               num_outputs=16, stride=3, \
                               scope='Layer9'))
                 if self.with_bn:
                     self.L.append(layers.batch_norm(self.L[-1], \
                                   scope='Norm9'))
             self.L.append(layers.flatten(self.L[-1]))
             self.L.append(layers.fully_connected(self.L[-1], num_outputs=2, \
                     activation_fn=None, normalizer_fn=None, \
                     weights_initializer=tf.random_normal_initializer(mean=0., stddev=0.01), \
                     biases_initializer=tf.constant_initializer(0.), scope='ip'))
     self.outputs = self.L[-1]
     return self.outputs
Exemplo n.º 26
0
    def _build_critic_network(self):
        """ Common shared critic. Take the observation(state) from all agencies
            First observation state_inputs[0] must be the observation of 'self' agent.

        Variable:
            num_assist (int) : number of assisting state
            critic_state_input (placeholder) : state placeholder for critic evaluation
            assist_states (list placeholder) : state placeholder for assists critic evaluation
            assist_mask (boolean placeholder) : mask of assist inputs (Block if flowpath is blocked)
        """

        num_assist = self.num_agent-1

        self.critic_state_input = tf.placeholder(shape=self.in_size, dtype=tf.float32, name='cr_state_hold')
        self.assist_states = [tf.placeholder(shape=self.in_size, dtype=tf.float32, name='assist_states')
                                for _ in range(num_assist)]
        self.mask = tf.placeholder(shape=[None, self.num_agent], dtype=tf.float32, name='mask')

        scope = 'critic'
        critic_evaluations = []
        with tf.variable_scope(scope):
            for input_tensor in [self.critic_state_input]+self.assist_states: 
                net = layers.conv2d(input_tensor,
                                    32,
                                    [3,3],
                                    activation_fn=tf.nn.relu,
                                    weights_initializer=layers.xavier_initializer_conv2d(),
                                    biases_initializer=tf.zeros_initializer(),
                                    padding='VALID',
                                    scope='conv1',
                                    reuse=tf.AUTO_REUSE)
                net = layers.max_pool2d(net, [2,2])
                net = layers.conv2d(net,
                                    64,
                                    [2,2],
                                    activation_fn=tf.nn.relu,
                                    weights_initializer=layers.xavier_initializer_conv2d(),
                                    biases_initializer=tf.zeros_initializer(),
                                    padding='VALID',
                                    scope='conv2',
                                    reuse=tf.AUTO_REUSE)
                net = layers.flatten(net)
                net = layers.fully_connected(net, 128, scope='dense1', reuse=tf.AUTO_REUSE)
                net = layers.fully_connected(net, 1, activation_fn=None, scope='dense2', reuse=tf.AUTO_REUSE)
                #if input_tensor in self.assist_states:
                #    net = tf.stop_gradient(net)
                critic_evaluations.append(tf.reshape(net,[-1,1]))
            with tf.name_scope('Concat'):
                net = tf.concat(critic_evaluations, 1) # [None, 4]
                reweight = tf.get_variable(name='critic_reweight',
                                           shape=[self.num_agent],
                                           dtype=tf.float32,
                                           initializer=tf.constant_initializer(value=1.0/self.num_agent)
                                           )
                shift = tf.get_variable(name='critic_shift',
                                           shape=[1],
                                           dtype=tf.float32,
                                           initializer=tf.zeros_initializer
                                           )
                net = tf.multiply(net, reweight) + shift
                net = tf.reduce_sum(tf.multiply(net, self.mask), axis=1)

        vars_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.scope+'/'+scope)

        return net, vars_list
Exemplo n.º 27
0
 def _create_weight(self, name, shape):
     init = layers.xavier_initializer_conv2d(dtype=tf.float32)
     return tf.Variable(init(shape=shape), name=name)
Exemplo n.º 28
0
    def _initialize_weights(self):
        all_weights = dict()
        n_layers = len(self.n_hidden)
        all_weights['Coef'] = tf.Variable(
            0 * tf.ones([self.batch_size, self.batch_size], tf.float32),
            name='Coef')

        all_weights['enc_w0'] = tf.get_variable(
            "enc_w0",
            shape=[
                self.kernel_size[0], self.kernel_size[0], 1, self.n_hidden[0]
            ],
            initializer=layers.xavier_initializer_conv2d(),
            regularizer=self.reg)
        all_weights['enc_b0'] = tf.Variable(
            tf.zeros([self.n_hidden[0]],
                     dtype=tf.float32))  # , name = 'enc_b0'

        iter_i = 1
        while iter_i < n_layers:
            enc_name_wi = 'enc_w' + str(iter_i)
            all_weights[enc_name_wi] = tf.get_variable(enc_name_wi, shape=[self.kernel_size[iter_i], self.kernel_size[iter_i], self.n_hidden[iter_i-1], \
                        self.n_hidden[iter_i]], initializer=layers.xavier_initializer_conv2d(),regularizer = self.reg)
            enc_name_bi = 'enc_b' + str(iter_i)
            all_weights[enc_name_bi] = tf.Variable(
                tf.zeros([self.n_hidden[iter_i]],
                         dtype=tf.float32))  # , name = enc_name_bi
            iter_i = iter_i + 1

        iter_i = 1
        while iter_i < n_layers:
            dec_name_wi = 'dec_w' + str(iter_i - 1)
            all_weights[dec_name_wi] = tf.get_variable(
                dec_name_wi,
                shape=[
                    self.kernel_size[n_layers - iter_i],
                    self.kernel_size[n_layers - iter_i],
                    self.n_hidden[n_layers - iter_i - 1],
                    self.n_hidden[n_layers - iter_i]
                ],
                initializer=layers.xavier_initializer_conv2d(),
                regularizer=self.reg)
            dec_name_bi = 'dec_b' + str(iter_i - 1)
            all_weights[dec_name_bi] = tf.Variable(
                tf.zeros([self.n_hidden[n_layers - iter_i - 1]],
                         dtype=tf.float32))  # , name = dec_name_bi
            iter_i = iter_i + 1

        dec_name_wi = 'dec_w' + str(iter_i - 1)
        all_weights[dec_name_wi] = tf.get_variable(
            dec_name_wi,
            shape=[
                self.kernel_size[0], self.kernel_size[0], 1, self.n_hidden[0]
            ],
            initializer=layers.xavier_initializer_conv2d(),
            regularizer=self.reg)
        dec_name_bi = 'dec_b' + str(iter_i - 1)
        all_weights[dec_name_bi] = tf.Variable(tf.zeros(
            [1], dtype=tf.float32))  # , name = dec_name_bi

        return all_weights
Exemplo n.º 29
0
	def __init__(self, params=None):
		self.log_dis = {}
		self.log_dis[0] = [0, 0]
		for i in range(1, 10):
			self.log_dis[i] = [self.log_dis[i-1][1] + 1, self.log_dis[i-1][1] + 2**i]		

		self.params = params

		self.keep_prob = params["keep_prob"]
		self.nb_epochs = params["nb_epochs"]

		# load word and char dictionaries
		dicts = pickle.load(open(params["dicts_file"], "rb"))
		self.w2i = dicts["w2i"]
		self.i2w = dicts["i2w"]
		self.c2i = dicts["c2i"]
		self.i2c = dicts["i2c"]		

		self.word_dim = params["word_dim"]
		self.word_vocab_size = len(self.w2i)
		self.char_dim = params["char_dim"]
		self.char_vocab_size = len(self.c2i)

		print("Sizes of word and char dictionaries: {}, {}".format(self.word_vocab_size, self.char_vocab_size))	

		self.word_emb = np.zeros(shape=(self.word_vocab_size, self.word_dim))		
		# load word embedding		
		if "word_emb" in params:
			print("pre-trained word embedding {} is being loaded ...".format(params["word_emb"]))
			self.load_word_emb(params["word_emb"])

		tf.reset_default_graph()
		
		# [sent, word]
		self.tf_word_ids = tf.placeholder(dtype=tf.int32, shape=[None, None], name="word_ids")
		# real length sents
		self.tf_sentence_lengths= tf.placeholder(dtype=tf.int32, shape=[None], name="sentence_lengths")

		# [sent, word, char]
		self.tf_char_ids = tf.placeholder(dtype=tf.int32, shape=[None, None, None], name="char_ids")

		# binary matrix representing the relationship between sents: [sent, sent]		
		self.tf_target_matrix = tf.placeholder(dtype=tf.int32, shape=[None, None], name="target_matrix")

		# keep_prob
		self.tf_keep_prob = tf.placeholder(dtype=tf.float32, shape=[], name="keep_prob")

		# learning rate
		self.tf_learning_rate= tf.placeholder(dtype=tf.float32, shape=[], name="learning_rate")

		# distant sentences
		self.tf_distant_sents = tf.placeholder(dtype=tf.int32, shape=[None, None], name="tf_distant_sents")
		
		# load word embedding
		with tf.variable_scope("word_embedding"):
			tf_word_embeddings = tf.Variable(self.word_emb, dtype=tf.float32,
				trainable=True, name="word_embedding")
			embedded_words = tf.nn.embedding_lookup(tf_word_embeddings, self.tf_word_ids, name="embedded_words")
			self.input = embedded_words # sent, word, word_dim


		# CNN network to capture character-level features
		with tf.variable_scope("char_cnn"):
			tf_char_embeddings = tf.get_variable(name="char_embeddings",
												 dtype=tf.float32,
												 shape=[self.char_vocab_size, self.char_dim],
												 trainable=True,
												 initializer=xavier_initializer())

			conv = tf.nn.embedding_lookup(tf_char_embeddings,
										  self.tf_char_ids,
										  name="embedded_cnn_chars")
			for i, (ks, fil) in enumerate(self.params["conv"]):
				conv = tf.layers.conv2d(inputs=conv, # sent, word, char, feature
										filters=fil,
										kernel_size=(1, ks),
										strides=(1, 1),
										padding="same",
										name="conv_{}".format(i),
										kernel_initializer=xavier_initializer_conv2d())
			# sent, word, char, cnn_feature
			# shape = tf.shape(conv)
			# conv = tf.nn.dropout(x=conv, keep_prob=self.tf_keep_prob, noise_shape=[shape[0], 1, shape[2], shape[3]])
			# conv = tf.nn.dropout(x=conv, keep_prob=self.tf_keep_prob)
			self.char_cnn = tf.reduce_max(conv, axis=2) # sent, word, cnn_feature
			self.input = tf.nn.dropout(tf.concat([self.input, self.char_cnn], axis=-1), self.tf_keep_prob) # [sents, words, word_dim + cnn_features]

		# Bi-LSTM to generate final input representation in combination with both left and right contexts
		with tf.variable_scope("bi_lstm_words"):
			cell_fw = tf.contrib.rnn.LSTMCell(self.params["word_lstm_units"])
			cell_bw = tf.contrib.rnn.LSTMCell(self.params["word_lstm_units"])
			(output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, self.input,
																		sequence_length=self.tf_sentence_lengths,
																		dtype=tf.float32)			

			bilstm_output = tf.concat([output_fw, output_bw], axis=-1) # [sents, words, 2*lstm_units]

			mask = tf.where(condition=tf.equal(self.tf_word_ids, self.w2i["<PAD>"]), x=-1e10*tf.ones_like(self.tf_word_ids, dtype=tf.float32), y=tf.zeros_like(self.tf_word_ids, dtype=tf.float32))
			mask = tf.tile(tf.expand_dims(mask, -1), (1, 1, 2*self.params["word_lstm_units"]))

			bilstm_output = bilstm_output + mask
			# bilstm_output = tf.nn.dropout(tf.reduce_max(bilstm_output, axis=1), self.tf_keep_prob) # [sents, 2*word_lstm_units]
			bilstm_output = tf.reduce_max(bilstm_output, axis=1) # [sents, 2*word_lstm_units]

		# represent sents in their contexts 
		with tf.variable_scope("sent_representation"):			
			# sent_rep = tf.layers.conv1d(inputs=sent_rep[None, :, :], filters=2*self.lstm_num_units, kernel_size=3, padding='same')			
			# sent_rep = tf.squeeze(sent_rep, axis=0)			
			sent_fw = tf.contrib.rnn.LSTMCell(self.params["sent_lstm_units"])
			sent_bw = tf.contrib.rnn.LSTMCell(self.params["sent_lstm_units"])
			(output_sent_fw, output_sent_bw), _ = tf.nn.bidirectional_dynamic_rnn(sent_fw, sent_bw, bilstm_output[None, :, :],
																		# sequence_length=self.tf_sentence_lengths,
																		dtype=tf.float32)
			sent_bilstm = tf.concat([output_sent_fw, output_sent_bw], axis=-1)

			sent_rep = tf.squeeze(sent_bilstm, axis=0) # [sents, 2*sent_lstm_units]

		with tf.variable_scope("self_att"):
			distant_embeddings = tf.get_variable(name="distant_embeddings",
												 dtype=tf.float32,
												 shape=[10, 30],
												 trainable=True,
												 initializer=xavier_initializer()
												 )
			embedded_disant_sent = tf.nn.embedding_lookup(distant_embeddings, self.tf_distant_sents, name="embedded_disant_sent") # [sent, sent, 20]

			x = sent_rep
			de = x.get_shape()[-1]
			w = tf.get_variable(dtype=tf.float32, shape=[de], trainable=True, name="w")
			w1 = tf.get_variable(dtype=tf.float32, shape=[de, de], trainable=True, name="W1")
			w2 = tf.get_variable(dtype=tf.float32, shape=[de, de], trainable=True, name="W2")
			b1 = tf.get_variable(dtype=tf.float32, shape=[de], trainable=True, name="b1")
			b = tf.get_variable(dtype=tf.float32, shape=[], trainable=True, name="b")

			x_w1 = tf.matmul(x, w1) # n, de
			s = tf.map_fn(lambda xj: x_w1 + tf.tensordot(w2, xj, axes=1) + b1, x) # n, n, de
			s = s + tf.layers.dense(embedded_disant_sent, de, use_bias=False, reuse=tf.AUTO_REUSE)
			f = tf.tensordot(tf.tanh(s), w, axes=1) + b # n, n
			weight = tf.nn.softmax(f, axis=1) # n, n
			h = tf.transpose(tf.map_fn(lambda weight_j: tf.transpose(x)*weight_j, weight), [0, 2, 1]) # [sents, sents, 4*sent_lstm_units]
						
		with tf.variable_scope("loss_and_opt"):
			self.logits = tf.nn.dropout(tf.layers.dense(inputs=h, units=2, activation=None), self.tf_keep_prob)

			self.logits = 0.5*(self.logits + tf.transpose(self.logits, [1, 0, 2]))

			self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.one_hot(self.tf_target_matrix, 2),
														   logits=self.logits,
														   name="loss_function"))

			self.pred = tf.argmax(self.logits, axis=-1)
			eq = tf.cast(tf.equal(tf.cast(self.pred, tf.int32), self.tf_target_matrix), tf.float32)
			self.acc = tf.reduce_mean(eq)

			self.opt = tf.train.AdamOptimizer(learning_rate=self.tf_learning_rate).minimize(self.loss)
Exemplo n.º 30
0
def redSimple(input):
    out = tf.reshape(input, [-1, 128, 128, 1])
    out = conv2d(out,
                 64,
                 3,
                 activation_fn=tf.nn.relu,
                 weights_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)

    out = conv2d(out,
                 64,
                 3,
                 activation_fn=tf.nn.relu,
                 weights_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)
    out = max_pool2d(out, 3, stride=2)
    out = conv2d(out,
                 128,
                 3,
                 activation_fn=tf.nn.relu,
                 weights_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)
    out = conv2d(out,
                 128,
                 3,
                 activation_fn=tf.nn.relu,
                 weights_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)
    out = max_pool2d(out, 3, stride=2)

    out = conv2d(out,
                 128,
                 3,
                 activation_fn=tf.nn.relu,
                 weights_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)
    out = conv2d(out,
                 128,
                 3,
                 activation_fn=tf.nn.relu,
                 weights_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)
    out = max_pool2d(out, 3, stride=2)

    out = conv2d(out,
                 256,
                 3,
                 activation_fn=tf.nn.relu,
                 weights_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)
    out = conv2d(out,
                 256,
                 3,
                 activation_fn=tf.nn.relu,
                 weights_initializer=xavier_initializer_conv2d())
    out = batch_normalization(out)
    out = max_pool2d(out, 3, stride=2)

    out = tf.contrib.layers.flatten(out)

    out = fully_connected(out,
                          1024,
                          activation_fn=tf.nn.relu,
                          weights_initializer=xavier_initializer())
    out = fully_connected(out,
                          100,
                          activation_fn=None,
                          weights_initializer=xavier_initializer())
    return out
Exemplo n.º 31
0
    def generator_real(self, emg_data, z, reuse=False):
        # (None, 300, 16) -> (1, 100)
        # input = self.lstm(emg_data)
        _ = self.lstm(emg_data, reuse)
        input = z
        print(input.shape)
        # input = tf.concat([input, z],  1)
        # print(input.shape)

        test = True

        # Generator
        with tf.variable_scope('generator_real', reuse=reuse):
            net = slim.fully_connected(
                input,
                64 * 8 * 8,
                activation_fn=tf.nn.relu,
                weights_initializer=tflayers.xavier_initializer(),
                reuse=reuse)
            print(net)
            net = tf.reshape(net, [-1, 8, 8, 64])
            net = slim.conv2d(net, num_outputs=128, kernel_size=1, stride=1)
            print(net)

            if not test:
                with slim.arg_scope(
                    [slim.conv2d_transpose],
                        kernel_size=3,
                        stride=2,
                        weights_initializer=tflayers.xavier_initializer()):
                    with slim.arg_scope([slim.batch_norm],
                                        activation_fn=tf.nn.relu,
                                        is_training=(self.mode == 'train')):
                        net = slim.conv2d_transpose(net, num_outputs=128)
                        net = slim.batch_norm(net)
                        print(net)
                        net = slim.conv2d_transpose(
                            net, num_outputs=256)  # output : 32 x 32
                        net = slim.batch_norm(net)
                        print(net)
                        net = slim.conv2d_transpose(
                            net, num_outputs=512)  # output : 64 x 64
                        net = slim.batch_norm(net)
                        print(net)
                        net = slim.conv2d_transpose(
                            net, num_outputs=256)  # output : 128 x 128
                        net = slim.batch_norm(net)
                        print(net)

                net = slim.conv2d_transpose(
                    net,
                    num_outputs=3,
                    kernel_size=1,
                    stride=1,
                    weights_initializer=tflayers.xavier_initializer())
                print(net)

            else:
                with slim.arg_scope([slim.conv2d_transpose],
                                    kernel_size=3,
                                    stride=1,
                                    padding='SAME',
                                    weights_initializer=tflayers.
                                    xavier_initializer_conv2d()):
                    with slim.arg_scope([slim.batch_norm],
                                        activation_fn=tf.nn.relu):
                        # 2x Upsampling -> Conv2D
                        # Xavier -> Truncated normal
                        # 16 x 16
                        net = tf.image.resize_images(net, [16, 16])
                        net = slim.conv2d_transpose(net, num_outputs=128)
                        net = slim.batch_norm(net)
                        print(net)

                        # 32 x 32
                        net = tf.image.resize_images(net, [32, 32])
                        net = slim.conv2d_transpose(net, num_outputs=256)
                        net = slim.batch_norm(net)
                        print(net)

                        # 64 x 64
                        net = tf.image.resize_images(net, [64, 64])
                        net = slim.conv2d_transpose(net, num_outputs=512)
                        net = slim.batch_norm(net)
                        print(net)

                        # 128 x 128
                        net = tf.image.resize_images(net, [128, 128])
                        net = slim.conv2d_transpose(net, num_outputs=256)
                        net = slim.batch_norm(net)
                        print(net)

                net = slim.conv2d_transpose(
                    net,
                    num_outputs=1,
                    kernel_size=1,
                    stride=1,
                    padding='SAME',
                    activation_fn=tf.nn.tanh,
                    weights_initializer=tflayers.xavier_initializer_conv2d())
            print(net)

            return net