def model(self):
        """
        To generate a model.

        :return: The estimation of race finish time of a single horse in centi second
        """
        with tf.variable_scope(name_or_scope='race_predictor'):
            fc_0 = fc_layer(tf.layers.flatten(self._input),
                            256,
                            training=self.training,
                            name='fc_0')

            bi_0 = bilinear_layer(fc_0,
                                  256,
                                  training=self.training,
                                  name='bi_0')
            bi_1 = bilinear_layer(bi_0,
                                  256,
                                  training=self.training,
                                  name='bi_1')

            fc_1 = fc_layer(bi_1, 128, training=self.training, name='fc_1')

            win_output = tf.nn.softmax(tf.layers.dense(fc_1,
                                                       units=14,
                                                       activation=None),
                                       name='win_output')

            return win_output
Пример #2
0
def VGG16(x, n_classes, is_pretrain = True):
    x = utils.conv_layer('conv1_1', x, 64, filter = [3,3], strides = [1,1,1,1], is_pretrain = is_pretrain)
    x = utils.conv_layer('conv1_2', x, 64, filter = [3,3], strides = [1,1,1,1], is_pretrain = is_pretrain)
    x = utils.pool_layer('pool1', x, filter = [1,2,2,1], strides = [1,2,2,1], is_max_pool = True)

    x = utils.conv_layer('conv2_1', x, 128, filter = [3,3], strides = [1,1,1,1], is_pretrain = is_pretrain)
    x = utils.conv_layer('conv2_2', x, 128, filter = [3,3], strides = [1,1,1,1], is_pretrain = is_pretrain)
    x = utils.pool_layer('pool2', x, filter = [1,2,2,1], strides = [1,2,2,1], is_max_pool = True)

    x = utils.conv_layer('conv3_1', x, 256, filter=[3, 3], strides=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = utils.conv_layer('conv3_2', x, 256, filter=[3, 3], strides=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = utils.conv_layer('conv3_3', x, 256, filter=[3, 3], strides=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = utils.pool_layer('pool3', x, filter=[1,2,2,1], strides=[1,2,2,1], is_max_pool=True)

    x = utils.conv_layer('conv4_1', x, 512, filter=[3, 3], strides=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = utils.conv_layer('conv4_2', x, 512, filter=[3, 3], strides=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = utils.conv_layer('conv4_3', x, 512, filter=[3, 3], strides=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = utils.pool_layer('pool4', x, filter=[1,2,2,1], strides=[1,2,2,1], is_max_pool=True)

    x = utils.conv_layer('conv5_1', x, 512, filter=[3, 3], strides=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = utils.conv_layer('conv5_2', x, 512, filter=[3, 3], strides=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = utils.conv_layer('conv5_3', x, 512, filter=[3, 3], strides=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = utils.pool_layer('pool5', x, filter=[1, 2, 2, 1], strides=[1, 2, 2, 1], is_max_pool=True)

    x = utils.fc_layer('fc6', x, num_output = 4096)
    x = utils.batch_normalization(x)
    x = utils.fc_layer('fc7', x, num_output = 4096)
    x = utils.batch_normalization(x)
    x = utils.fc_layer('fc8', x, num_output = n_classes)

    return x
Пример #3
0
def critic(h):
    critic_h1 = utils.fc_layer(h, n_hidden[-1], 10, layer_name='critic_h1')
    out = utils.fc_layer(critic_h1,
                         10,
                         1,
                         layer_name='critic_h2',
                         act=tf.identity)
    return out
Пример #4
0
    def forward(self, x, keep_prob):
        #DNN
        #第一层全连接层
        hidden1 = fc_layer(x, self.input_dim, self.hidden_dim, 'layer1')
        #dropout
        hidden1 = dropout_layer(hidden1, keep_prob, 'layer1_dropout')

        #第二层全连接层
        y = fc_layer(hidden1,
                     self.hidden_dim,
                     self.ouput_dim,
                     'layer2',
                     act=tf.identity)

        return y
Пример #5
0
    def forward(self, x):
        # rate2 = tf.placeholder(tf.float32)
        with tf.variable_scope("lstm1"):
            lstm_fw_cell = get_lstm(20, 1 - 0.3, name="lstm_fw")
        # Backward direction cell
        with tf.variable_scope("lstm2"):
            lstm_bw_cell = get_lstm(20, 1 - 0.3, name="lstm_bw")

        outputs, states = tf.nn.bidirectional_dynamic_rnn(cell_fw=lstm_fw_cell,
                                                          cell_bw=lstm_bw_cell,
                                                          dtype=tf.float32,
                                                          inputs=x)

        output_fw, output_bw = outputs
        # states_fw, states_bw = states
        #print(output_fw,output_bw)
        #print(states_fw,states_bw)
        lstm_output = tf.concat([output_fw, output_bw], 2)
        #print(lstm_output.shape)
        #lstm_output = tf.reshape(lstm_output, [None, 8*40])
        lstm_output = tf.layers.flatten(lstm_output)
        #print(lstm_output.shape)
        # 输出层
        y = fc_layer(lstm_output,
                     8 * 40,
                     10,
                     'layer_after_lstm',
                     act=tf.nn.sigmoid)

        return y
Пример #6
0
    def forward(self, x):
        hidden = conv_layer(x, 5, 3, 6, self.regularizer)
        hidden = conv_layer(hidden, 3, 6, 12, self.regularizer)
        hidden = tf.reshape(hidden, [-1, 20 * 40 * 12])
        y = fc_layer(hidden, 20 * 40 * 12, 10, 'layer_after_conv')

        return y
    def model(self):
        """
        To generate a model.

        :return: The estimation of race finish time of a single horse in centi second
        """
        with tf.variable_scope(name_or_scope='race_predictor'):
            fc_0 = fc_layer(tf.layers.flatten(self._input),
                            512,
                            training=self.training,
                            name='fc_0')

            bi_0 = bilinear_layer(fc_0,
                                  512,
                                  training=self.training,
                                  name='bi_0')
            bi_1 = bilinear_layer(bi_0,
                                  512,
                                  training=self.training,
                                  name='bi_1')

            velocity_output = tf.layers.dense(bi_1,
                                              units=1,
                                              activation=None,
                                              use_bias=False,
                                              name='velocity_output')
            alpha_output = tf.layers.dense(bi_1,
                                           units=1,
                                           activation=None,
                                           use_bias=False,
                                           name='alpha_output')

            return velocity_output, alpha_output
Пример #8
0
 def naive_model(self):
     with tf.variable_scope(name_or_scope='game_rating'):
         # pre-processing layers
         fc_0 = fc_layer(self._features,
                         128,
                         training=self.training,
                         name='fc_0')
         # bi-linear layers
         bi_0 = bilinear_layer(fc_0,
                               128,
                               training=self.training,
                               name='bi_0')
         # output layer
         playtime_output = fc_layer(bi_0,
                                    1,
                                    training=self.training,
                                    name='playtime_output')
         # return
         return playtime_output
Пример #9
0
 def model(self):
     with tf.variable_scope(name_or_scope='game_rating'):
         # pre-processing layers
         semantic_fc_0 = fc_layer(tf.layers.flatten(self._semantic),
                                  512,
                                  training=self.training,
                                  name='semantic_fc_0')
         features_fc_0 = fc_layer(self._features,
                                  128,
                                  training=self.training,
                                  name='features_fc_0')
         # bi-linear layers
         semantic_bi_0 = bilinear_layer(semantic_fc_0,
                                        512,
                                        training=self.training,
                                        name='semantic_bi_0')
         features_bi_0 = bilinear_layer(features_fc_0,
                                        128,
                                        training=self.training,
                                        name='features_bi_0')
         # post-processing layers
         semantic_fc_1 = fc_layer(semantic_bi_0,
                                  128,
                                  training=self.training,
                                  name='semantic_fc_1')
         features_fc_1 = fc_layer(features_bi_0,
                                  128,
                                  training=self.training,
                                  name='features_fc_1')
         # merge user features and movie features by an add_n operation, following by a fully-connected layer
         merge = tf.multiply(semantic_fc_1, features_fc_1, name='merge')
         # merge = tf.add_n([semantic_fc_1, features_fc_1], name='merge')
         # output layer
         playtime_output = fc_layer(merge,
                                    1,
                                    training=self.training,
                                    name='playtime_output')
         # return
         return playtime_output
Пример #10
0
l2_param = 1e-5
lr = 1e-4
batch_size = 64
num_steps = 1200
num_class = 10
n_input = 4096
n_hidden = [500, 100]

with tf.name_scope('input'):
    X = tf.placeholder(dtype=tf.float32)
    y_true = tf.placeholder(dtype=tf.int32)
    train_flag = tf.placeholder(dtype=tf.bool)
    y_true_one_hot = tf.one_hot(y_true, num_class)

with tf.name_scope('generator'):
    h1 = utils.fc_layer(X, n_input, n_hidden[0], layer_name='hidden1')
    h2 = utils.fc_layer(h1, n_hidden[0], n_hidden[1], layer_name='hidden2')

with tf.name_scope('slice_data'):
    h2_s = tf.cond(train_flag, lambda: tf.slice(h2, [0, 0], [batch_size / 2, -1]), lambda: h2)
    h2_t = tf.cond(train_flag, lambda: tf.slice(h2, [batch_size / 2, 0], [batch_size / 2, -1]), lambda: h2)
    ys_true = tf.cond(train_flag, lambda: tf.slice(y_true_one_hot, [0, 0], [batch_size / 2, -1]), lambda: y_true_one_hot)

with tf.name_scope('classifier'):
    W_clf = tf.Variable(tf.truncated_normal([n_hidden[-1], num_class], stddev=1. / tf.sqrt(n_hidden[-1] / 2.)), name='clf_weight')
    b_clf = tf.Variable(tf.constant(0.1, shape=[num_class]), name='clf_bias')
    pred_logit = tf.matmul(h2_s, W_clf) + b_clf
    pred_softmax = tf.nn.softmax(pred_logit)
    clf_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred_logit, labels=ys_true))
    clf_acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y_true_one_hot, 1), tf.argmax(pred_softmax, 1)), tf.float32))
Пример #11
0
l2_param = 1e-5
lr = 1e-4
batch_size = 64
num_steps = 1200
num_class = 10
n_input = 4096
n_hidden = [500, 100]

with tf.name_scope('input'):
    X = tf.placeholder(dtype=tf.float32)
    y_true = tf.placeholder(dtype=tf.int32)
    train_flag = tf.placeholder(dtype=tf.bool)
    y_true_one_hot = tf.one_hot(y_true, num_class)

h1 = utils.fc_layer(X, n_input, n_hidden[0], layer_name='hidden1')
h2 = utils.fc_layer(h1, n_hidden[0], n_hidden[1], layer_name='hidden2')

with tf.name_scope('slice_data'):
    h2_s = tf.cond(train_flag,
                   lambda: tf.slice(h2, [0, 0], [batch_size / 2, -1]),
                   lambda: h2)
    h2_t = tf.cond(
        train_flag,
        lambda: tf.slice(h2, [batch_size / 2, 0], [batch_size / 2, -1]),
        lambda: h2)
    ys_true = tf.cond(
        train_flag,
        lambda: tf.slice(y_true_one_hot, [0, 0], [batch_size / 2, -1]),
        lambda: y_true_one_hot)
Пример #12
0
    b_fc2 = utils.bias_variable([10], 'fc2_bias')
    pred_logit = tf.matmul(h_s, W_fc2) + b_fc2
    clf_loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=pred_logit,
                                                labels=ys_true))
    clf_acc = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(ys_true, 1), tf.argmax(pred_logit, 1)),
                tf.float32))

alpha = tf.random_uniform(shape=[batch_size / 2, 1], minval=0., maxval=1.)
differences = h_s - h_t
interpolates = h_t + (alpha * differences)
h2_whole = tf.concat([h_fc1, interpolates], 0)

with tf.name_scope('critic'):
    critic_h1 = utils.fc_layer(h2_whole, 1024, 100, layer_name='critic_h1')
    critic_out = utils.fc_layer(critic_h1,
                                100,
                                1,
                                layer_name='critic_h2',
                                act=tf.identity)

critic_s = tf.cond(train_flag,
                   lambda: tf.slice(critic_out, [0, 0], [batch_size / 2, -1]),
                   lambda: critic_out)
critic_t = tf.cond(
    train_flag,
    lambda: tf.slice(critic_out, [batch_size / 2, 0], [batch_size / 2, -1]),
    lambda: critic_out)
wd_loss = (tf.reduce_mean(critic_s) - tf.reduce_mean(critic_t))
gradients = tf.gradients(critic_out, [h2_whole])[0]
Пример #13
0
    def __init__(self, n_input, n_hidden, n_emb, num_class, clf_type, l2_w,
                 net_pro_w, batch_size):

        self.X = tf.sparse_placeholder(
            dtype=tf.float32)  #each node's own attributes
        self.X_nei = tf.sparse_placeholder(
            dtype=tf.float32)  #each node's weighted neighbors' attributes
        self.y_true = tf.placeholder(dtype=tf.float32)
        self.d_label = tf.placeholder(
            dtype=tf.float32
        )  #domain label, source network [1 0] or target network [0 1]
        self.Ada_lambda = tf.placeholder(
            dtype=tf.float32)  #grl_lambda Gradient reversal scaler
        self.dropout = tf.placeholder(tf.float32)
        self.A_s = tf.sparse_placeholder(
            dtype=tf.float32)  #network proximity matrix of source network
        self.A_t = tf.sparse_placeholder(
            dtype=tf.float32)  #network proximity matrix of target network
        self.mask = tf.placeholder(
            dtype=tf.float32
        )  #check a node is with observable label (1) or not (0)
        self.learning_rate = tf.placeholder(dtype=tf.float32)

        with tf.name_scope('Network_Embedding'):
            ##feature exactor 1
            h1_self = utils.fc_layer(self.X,
                                     n_input,
                                     n_hidden[0],
                                     layer_name='hidden1_self',
                                     input_type='sparse',
                                     drop=self.dropout)
            h2_self = utils.fc_layer(h1_self,
                                     n_hidden[0],
                                     n_hidden[1],
                                     layer_name='hidden2_self')

            ##feature exactor 2
            h1_nei = utils.fc_layer(self.X_nei,
                                    n_input,
                                    n_hidden[0],
                                    layer_name='hidden1_nei',
                                    input_type='sparse',
                                    drop=self.dropout)
            h2_nei = utils.fc_layer(h1_nei,
                                    n_hidden[0],
                                    n_hidden[1],
                                    layer_name='hidden2_nei')

            ##concatenation layer, final embedding vector representation
            self.emb = utils.fc_layer(tf.concat([h2_self, h2_nei], 1),
                                      n_hidden[-1] * 2,
                                      n_emb,
                                      layer_name='concat')

            ##pairwise constraint
            emb_s = tf.slice(self.emb, [0, 0], [int(batch_size / 2), -1])
            emb_t = tf.slice(self.emb, [int(batch_size / 2), 0],
                             [int(batch_size / 2), -1])
            #L2 distance between source nodes
            r_s = tf.reduce_sum(emb_s * emb_s, 1)
            r_s = tf.reshape(r_s, [-1, 1])
            Dis_s = r_s - 2 * tf.matmul(
                emb_s, tf.transpose(emb_s)) + tf.transpose(r_s)
            net_pro_loss_s = tf.reduce_mean(
                tf.sparse.reduce_sum(self.A_s.__mul__(Dis_s), axis=1))

            #L2 distance between target nodes
            r_t = tf.reduce_sum(emb_t * emb_t, 1)
            r_t = tf.reshape(r_t, [-1, 1])
            Dis_t = r_t - 2 * tf.matmul(
                emb_t, tf.transpose(emb_t)) + tf.transpose(r_t)
            net_pro_loss_t = tf.reduce_mean(
                tf.sparse.reduce_sum(self.A_t.__mul__(Dis_t), axis=1))

            self.net_pro_loss = net_pro_w * (net_pro_loss_s + net_pro_loss_t)

        with tf.name_scope('Node_Classifier'):
            ##node classification
            W_clf = tf.Variable(tf.truncated_normal([n_emb, num_class],
                                                    stddev=1. /
                                                    tf.sqrt(n_emb / 2.)),
                                name='clf_weight')
            b_clf = tf.Variable(tf.constant(0.1, shape=[num_class]),
                                name='clf_bias')
            pred_logit = tf.matmul(self.emb, W_clf) + b_clf

            if clf_type == 'multi-class':
                ### multi-class, softmax output
                loss = tf.nn.softmax_cross_entropy_with_logits_v2(
                    logits=pred_logit, labels=self.y_true)
                loss = loss * self.mask  #count loss only based on labeled nodes
                self.clf_loss = tf.reduce_sum(loss) / tf.reduce_sum(self.mask)
                self.pred_prob = tf.nn.softmax(pred_logit)

            elif clf_type == 'multi-label':
                ### multi-label, sigmod output
                loss = tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=pred_logit, labels=self.y_true)
                loss = loss * self.mask[:,
                                        None]  #count loss only based on labeled nodes, each column mutiply by mask
                self.clf_loss = tf.reduce_sum(loss) / tf.reduce_sum(self.mask)
                self.pred_prob = tf.sigmoid(pred_logit)

        with tf.name_scope('Domain_Discriminator'):
            h_grl = flip_gradient(self.emb, self.Ada_lambda)
            ##MLP for domain classification
            h_dann_1 = utils.fc_layer(h_grl,
                                      n_emb,
                                      128,
                                      layer_name='dann_fc_1')
            h_dann_2 = utils.fc_layer(h_dann_1,
                                      128,
                                      128,
                                      layer_name='dann_fc_2')
            W_domain = tf.Variable(tf.truncated_normal([128, 2],
                                                       stddev=1. /
                                                       tf.sqrt(128 / 2.)),
                                   name='dann_weight')
            b_domain = tf.Variable(tf.constant(0.1, shape=[2]),
                                   name='dann_bias')
            d_logit = tf.matmul(h_dann_2, W_domain) + b_domain
            self.d_softmax = tf.nn.softmax(d_logit)
            self.domain_loss = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits_v2(
                    logits=d_logit, labels=self.d_label))

        all_variables = tf.trainable_variables()
        self.l2_loss = l2_w * tf.add_n(
            [tf.nn.l2_loss(v) for v in all_variables if 'bias' not in v.name])

        self.total_loss = self.net_pro_loss + self.clf_loss + self.domain_loss + self.l2_loss

        self.train_op = tf.train.MomentumOptimizer(
            self.learning_rate, 0.9).minimize(self.total_loss)
Пример #14
0
num_class = 2
n_input = xs.shape[1]
n_hidden = [500]

tf.set_random_seed(0)
np.random.seed(0)

with tf.name_scope('input'):
    X = tf.sparse_placeholder(dtype=tf.float32)
    y_true = tf.placeholder(dtype=tf.int32)
    train_flag = tf.placeholder(dtype=tf.bool)
    y_true_one_hot = tf.one_hot(y_true, num_class)

h1 = utils.fc_layer(X,
                    n_input,
                    n_hidden[0],
                    layer_name='hidden1',
                    input_type='sparse')

with tf.name_scope('slice_data'):
    h1_s = tf.cond(train_flag,
                   lambda: tf.slice(h1, [0, 0], [batch_size / 2, -1]),
                   lambda: h1)
    h1_t = tf.cond(
        train_flag,
        lambda: tf.slice(h1, [batch_size / 2, 0], [batch_size / 2, -1]),
        lambda: h1)
    ys_true = tf.cond(
        train_flag,
        lambda: tf.slice(y_true_one_hot, [0, 0], [batch_size / 2, -1]),
        lambda: y_true_one_hot)
Пример #15
0
num_step = 5000
batch_size = 64
tf.set_random_seed(0)

n_input = xs.shape[1]
num_class = 2
n_hidden = [20]

with tf.name_scope('input'):
    X = tf.placeholder(dtype=tf.float32)
    y_true = tf.placeholder(dtype=tf.int32)
    train_flag = tf.placeholder(dtype=tf.bool)
    y_true_one_hot = tf.one_hot(y_true, num_class)

with tf.name_scope('generator'):
    h1 = utils.fc_layer(X, n_input, n_hidden[0], layer_name='hidden1', input_type='dense')

with tf.name_scope('slice_data'):
    h1_s = tf.cond(train_flag, lambda: tf.slice(h1, [0, 0], [batch_size / 2, -1]), lambda: h1)
    h1_t = tf.cond(train_flag, lambda: tf.slice(h1, [batch_size / 2, 0], [batch_size / 2, -1]), lambda: h1)
    ys_true = tf.cond(train_flag, lambda: tf.slice(y_true_one_hot, [0, 0], [batch_size / 2, -1]), lambda: y_true_one_hot)

with tf.name_scope('classifier'):
    W_clf = tf.Variable(tf.truncated_normal([n_hidden[-1], num_class], stddev=1. / tf.sqrt(n_hidden[-1] / 2.)), name='clf_weight')
    b_clf = tf.Variable(tf.constant(0.1, shape=[num_class]), name='clf_bias')
    pred_logit = tf.matmul(h1_s, W_clf) + b_clf
    pred_softmax = tf.nn.softmax(pred_logit)
    clf_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred_logit, labels=ys_true))
    clf_acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(ys_true, 1), tf.argmax(pred_softmax, 1)), tf.float32))

alpha = tf.random_uniform(shape=[batch_size / 2, 1], minval=0., maxval=1.)
Пример #16
0
num_step = 10000
num_class = 2
n_input = xs.shape[1]
n_hidden = [500]

tf.set_random_seed(0)
np.random.seed(0)

with tf.name_scope('input'):
    X = tf.sparse_placeholder(dtype=tf.float32)
    y_true = tf.placeholder(dtype=tf.int32)
    train_flag = tf.placeholder(dtype=tf.bool)
    y_true_one_hot = tf.one_hot(y_true, num_class)

with tf.name_scope('generator'):
    h1 = utils.fc_layer(X, n_input, n_hidden[0], layer_name='hidden1', input_type='sparse')

with tf.name_scope('slice_data'):
    h1_s = tf.cond(train_flag, lambda: tf.slice(h1, [0, 0], [batch_size / 2, -1]), lambda: h1)
    ys_true = tf.cond(train_flag, lambda: tf.slice(y_true_one_hot, [0, 0], [batch_size / 2, -1]), lambda: y_true_one_hot)
    h1_t = tf.cond(train_flag, lambda: tf.slice(h1, [batch_size / 2, 0], [batch_size / 2, -1]), lambda: h1)

with tf.name_scope('classifier'):
    W_clf = tf.Variable(tf.truncated_normal([n_hidden[-1], num_class], stddev=1. / tf.sqrt(n_hidden[-1] / 2.)), name='clf_weight')
    b_clf = tf.Variable(tf.constant(0.1, shape=[num_class]), name='clf_bias')
    pred_logit = tf.matmul(h1_s, W_clf) + b_clf
    pred_softmax = tf.nn.softmax(pred_logit)
    clf_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred_logit, labels=ys_true))
    clf_acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(ys_true, 1), tf.argmax(pred_softmax, 1)), tf.float32))
    clf_loss_sum = tf.summary.scalar('clf_loss', clf_loss)
    clf_acc_sum = tf.summary.scalar('clf_acc', clf_acc)
Пример #17
0
                        name=None)
'''
hidden21 = conv_layer(pool20, [filter_size, filter_size, filter_nb_4, filter_nb_5], 'conv-17', stride, keep_prob, is_training, act=activation_func)
hidden22 = conv_layer(hidden21, [filter_size, filter_size, filter_nb_5, filter_nb_5], 'conv-18', stride, keep_prob, is_training, act=activation_func)
hidden23 = conv_layer(hidden22, [filter_size, filter_size, filter_nb_5, filter_nb_5], 'conv-19', stride, keep_prob, is_training, act=activation_func)
hidden24 = conv_layer(hidden23, [filter_size, filter_size, filter_nb_5, filter_nb_5], 'conv-20', stride, keep_prob, is_training, act=activation_func)

pool25 = tf.nn.max_pool(hidden24, [1, 3, 3, 1], [1, 3, 3, 1], padding='SAME', data_format='NHWC', name=None)
'''

pool25 = tf.reshape(pool20, shape=[-1, 3 * 3 * filter_nb_4])

#fc14 = fc_layer(hidden17, [3 * 3 * filter_nb_4, 40], 'fc-1', keep_prob, is_training)

y = fc_layer(pool25, [3 * 3 * filter_nb_4, template_dim],
             'fc-1',
             keep_prob,
             act=None)

#############################################
################ THE LOSS ###################
#############################################
""" Loss for regression """
with tf.name_scope('training'):
    euclidean_loss = tf.reduce_mean(tf.reduce_sum(tf.square(y - y_), axis=1))

tf.summary.scalar('train_euclidean_loss', euclidean_loss)
""" Learning rate """
with tf.name_scope('learning_rate'):
    global_step = tf.Variable(0, trainable=False)
    learning_rate = tf.train.exponential_decay(inital_lr,
                                               global_step,