Ejemplo n.º 1
0
    def metric_fn(labels, logits):
      """Evaluation metric function.

      Evaluates accuracy.

      This function is executed on the CPU and should not directly reference
      any Tensors in the rest of the `model_fn`. To pass Tensors from the model
      to the `metric_fn`, provide as part of the `eval_metrics`. See
      https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
      for more information.

      Arguments should match the list of `Tensor` objects passed as the second
      element in the tuple passed to `eval_metrics`.

      Args:
        labels: `Tensor` with shape `[batch]`.
        logits: `Tensor` with shape `[batch, num_classes]`.

      Returns:
        A dict of the metrics to return from evaluation.
      """
      predictions = tf.argmax(logits, axis=1)
      top_1_accuracy = tf.metrics.accuracy(labels, predictions)
      in_top_5 = tf.cast(tf.nn.in_top_k(logits, labels, 5), tf.float32)
      top_5_accuracy = tf.metrics.mean(in_top_5)
      tf.summary('top_1_accuracy',top_1_accuracy)
      tf.summary('top_1_accuracy',top_1_accuracy)

      return {
          'top_1_accuracy': top_1_accuracy,
          'top_5_accuracy': top_5_accuracy,
      }
Ejemplo n.º 2
0
    def configure_summary(self, loss):
        """
        Set up the experiment summary.

        Args: 
            Loss (tf.Tensor) : the current loss of the experiment 

        Returns: 
            None 
        """
        tf.summary("loss", loss)
Ejemplo n.º 3
0
def mlp_train(train_data, test_data):
    sample = single_file(train_data[0]).next()

    size_x = np.array(sample[0]).shape
    size_y = np.array(sample[1]).shape

    x = tf.placeholder(tf.float32, size_x)
    y_ = tf.placeholder(tf.float32, size_y)

    sample = None

    W1 = tf.Variable(tf.zeros([size_x[-1], 30]))
    b1 = tf.Variable(tf.zeros([30]))
    h1 = tf.nn.softplus(tf.matmul(x, W1) + b1)

    W2 = tf.Variable(tf.zeros([30, 60]))
    b2 = tf.Variable(tf.zeros([60]))
    h2 = tf.nn.relu(tf.matmul(h1, W2) + b2)

    W3 = tf.Variable(tf.zeros([60, size_y[-1]]))
    b3 = tf.Variable(tf.zeros([size_y[-1]]))
    h3 = tf.matmul(h2, W3) + b3

    y = tf.sigmoid(h3)
    logits = tf.where(tf.greater(y, 0.5), tf.ones(tf.shape(y)),
                      tf.zeros(tf.shape(y)))
    #y = tf.nn.softmax(tf.matmul(x, W) + b)
    #cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y) + (1 - y_) * tf.log(1 - y), reduction_indices=[1]))
    cross_entropy = -tf.reduce_sum(y_ * tf.log(y) + (1 - y_) * tf.log(1 - y))
    train_step = tf.train.GradientDescentOptimizer(0.1).minimize(cross_entropy)
    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()

    for train_data_ in train_data:
        for [batch_xs, batch_ys] in single_file(train_data_):
            if batch_xs[0][1] == batch_xs[0][-1]:
                print("LFT and NotLFT are same")
                raise Error
            sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

    correct_prediction = tf.equal(logits, y_)
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    accuracy_total = tf.Variable(tf.zeros(tf.shape(accuracy)))
    batches = 0.0

    for test_data_ in test_data:
        for [batch_xs, batch_ys] in single_file(test_data_):
            sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys})
            accuracy_total += accuracy
            batches += 1
        avg_acc = accuracy_total / batches
        tf.summary("average accuracy", avg_acc)

        print(avg_acc)
Ejemplo n.º 4
0
    def model_build(self, X, Z):

        self.D_loss = tf.reduce_mean(
            tf.log(self.Discriminator(X)) +
            tf.log(1 - self.Discriminator(self.Generator(Z))))
        self.G_loss = tf.reduce_mean(
            tf.log(self.Discriminator(self.Generator(Z))))
        tf.summary('D_loss', self.D_loss)
        tf.summary('G_loss', self.G_loss)
        merged = tf.summary.merge_all()

        return self.D_loss, self.G_loss, merged
Ejemplo n.º 5
0
def main():
    X_train, X_test, t_train, t_test = get_normalized_data()
    ann = ANN([500,300])
    session = ann.set_session(tf.InteractiveSession())
    ann.fit(X_train, X_test, t_train, t_test, show_fig = True)
    writer = tf.summary('./tensorboard_logs/demo1')
    writer.add_graph(session.graph)
Ejemplo n.º 6
0
    def histo_summary(self, tag, values, step, bins=1000):
        """Log a histogram of the tensor of values."""

        # Create a histogram using numpy
        counts, bin_edges = np.histogram(values, bins=bins)

        # Fill the fields of the histogram proto
        hist = tf.HistogramProto()
        hist.min = float(np.min(values))
        hist.max = float(np.max(values))
        hist.num = int(np.prod(values.shape))
        hist.sum = float(np.sum(values))
        hist.sum_squares = float(np.sum(values**2))

        # Drop the start of the first bin
        bin_edges = bin_edges[1:]

        # Add bin edges and counts
        for edge in bin_edges:
            hist.bucket_limit.append(edge)
        for c in counts:
            hist.bucket.append(c)

        # Create and write Summary
        summary = tf.summary(value=[tf.summary.Value(tag=tag, histo=hist)])
        self.writer.add_summary(summary, step)
        self.writer.flush()
Ejemplo n.º 7
0
 def list_of_scalars_summary(self, tag_value_pairs, step):
     """Log scalar variables."""
     summary = tf.summary(value=[
         tf.summary.Value(tag=tag, simple_value=value)
         for tag, value in tag_value_pairs
     ])
     self.writer.add_summary(summary, step)
Ejemplo n.º 8
0
def save_losses_tensorboard(callback, name, loss, batch_no):
    summary = tf.summary()
    summary_value = summary.value.add()
    summary_value.simple_value = loss
    summary_value.tag = name
    callback.writer.add_summary(summary, batch_no)
    callback.writer.flush()
Ejemplo n.º 9
0
    def on_batch_end(self, batch, logs=None):
        # Pop the validation logs and handle them separately with
        # `self.val_writer`. Also rename the keys so that they can
        # be plotted on the same figure with the training metrics
        logs = logs or {}
        val_logs = {k.replace('val_', ''): v for k, v in logs.items() if k.startswith('val_')}
        for name, value in val_logs.items():
            summary = tf.summary()
            summary_value = summary.value.add()
            summary_value.simple_value = value.item()
            summary_value.tag = name
            self.val_writer.add_summary(summary, batch)
        self.val_writer.flush()

        # Pass the remaining logs to `TensorBoard.on_batch_end`
        logs = {k: v for k, v in logs.items() if not k.startswith('val_')}
        super(TrainValTensorBoard, self).on_batch_end(batch, logs)
Ejemplo n.º 10
0
    def init_graph(self):
        # dtype:数据类型。常用的是tf.float32, tf.float64等数值类型
        # shape:数据形状。默认是None,就是一维值,也可以是多维(比如[2, 3], [None, 3] 表示列是3,行不定)
        # name:名称
        # 无值的占位变量
        self.x = tf.placeholder(tf.int32, name="x")
        self.y = tf.placeholder(tf.int32, name="y")
        # 两个参数,初始化变量和名字
        # 有初始化值的变量
        self.weight = tf.Variable(10, name="weight")
        self.out = tf.add(tf.summary(self.x, self.weight), y)
        self.out = tf.nn.sigmoid(self.out)
        self.loss = tf.nn.sigmoid(self.out)
        # 全局迭代梯度次数
        self.global_step = tf.Variable(0, trainable=False)
        # 优化器
        opt = tf.train.GradientDescentOptimizer(self.learning_rate)
        # 查看可以训练的变量
        trainable_params = tf.trainable_variables()
        print(trainable_params)
        # 计算梯度 第一个参数相对于第二个参数的梯度的计算,维数和第二个参数相同,返回的是tensor
        gradients = tf.gradients(self.loss, trainable_params)

        # 在前向传播与反向传播之后,我们会得到每个权重的梯度diff,这时不像通常那样直接使用这些梯度进行权重更新,而是先求所有权重梯度的平方和sumsq_diff,如果sumsq_diff > clip_gradient,则求缩放因子scale_factor = clip_gradient / sumsq_diff。这个scale_factor在(   0, 1)
        # 之间。如果权重梯度的平方和sumsq_diff越大,那缩放因子将越小。
        # 最后将所有的权重梯度乘以这个缩放因子,这时得到的梯度才是最后的梯度信息。
        # 这样就保证了在一次迭代更新中,所有权重的梯度的平方和在一个设定范围以内,这个范围就是clip_gradient.
        # tf.clip_by_global_norm(t_list, clip_norm, use_norm=None, name=None)
        # t_list
        # 是梯度张量, clip_norm
        # 是截取的比率, 这个函数返回截取过的梯度张量和一个所有张量的全局范数。
        clip_gradients, _ = tf.clip_by_global_norm(gradients, 5)
        # 更新参数
        # grads_and_vars: 可选变量(gradient, variable)
        # global_step: 在变量已更新后将增加一。
        # name: 返回的操作的可选名称。默认为传递给Optimizer构造函数的名称。
        self.train_op = opt.apply_gradients(zip(clip_gradients,
                                                trainable_params),
                                            global_step=self.global_step)
Ejemplo n.º 11
0
    def eval_once(self, sess):
        with sess.as_default(), sess.graph.as_default():
            global_step, _ = sess.run(
                [tf.contrib.framework.get_global_Step(), self.copy_params.op])

            done = False
            state = atari_helpers.atari_make_initial_state(
                self.sp.process(self.env.reset()))
            total_reward = 0.0
            episode_length = 0

            while not done:
                action_probs = self._policy_net_predict(state, sess)
                action = np.random.choice(np.arange(len(action_probs)),
                                          p=action_probs)
                next_state, reward, done, _ = self.env.step(action)
                next_state = atari_helpers.atari_make_next_state(
                    state, self.sp.process(next_state))
                total_reward += reward
                episode_length += 1
                state = next_state

            episode_summary = tf.summary()
            episode_summary.value.add(simple_value=total_reward,
                                      tag="eval/total_reward")
            episode_summary.value.add(simple_value=episode_length,
                                      tag="eval/episode_length")
            self.summary_writer.add_summary(episode_summary, global_step)
            self.summary_writer.flush()

            if self.saver is not None:
                self.saver.save(sess, self.checkpoint_path)

            tf.logging.info(
                "Eval results at step {}: total_reward {},episode_length {}".
                format(global_step, total_reward, episode_length))

            return total_reward, episode_length
Ejemplo n.º 12
0
    def image_summary(self, tag, images, step):
        """Log a list of images."""

        img_summaries = []
        for i, img in enumerate(images):
            # Write the image to a string
            try:
                s = StringIO()
            except:
                s = BytesIO()
            scipy.misc.toimage(img).save(s, format="png")

            # Create an Image object
            img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
                                       height=img.shape[0],
                                       width=img.shape[1])
            # Create a Summary value
            img_summaries.append(
                tf.summary.Value(tag='%s/%d' % (tag, i), image=img_sum))

        # Create and write Summary
        summary = tf.summary(value=img_summaries)
        self.writer.add_summary(summary, step)
Ejemplo n.º 13
0
def kl_dist(A, B):
    c1 = tf.multiply(A, tf.log(tf.div(A, B)))
    c2 = tf.subtract(B, A)
    out = tf.summary(c1 + c2)
    return out
Ejemplo n.º 14
0
 def scalar_summary(self, tag, value, step):
     """Log a scalar variable."""
     summary = tf.summary(
         value=[tf.summary.Value(tag=tag, simple_value=value)])
     self.writer.add_summary(summary, step)
Ejemplo n.º 15
0
 def scalar_summary(self, tag, value, step):
     """Add scalar summary."""
     summary = tf.summary(
         value=[tf.summary.value(tag=tag, simple_value=value)])
     self.writer.add_summary(summary, step)
Ejemplo n.º 16
0
y_data = np.square(x_data) - 0.5
noise = np.random.normal(0, 0.05,
                         x_data.shape)  # 以0为中心,方差 0.05,输出的shape和x_data一样的值
with tf.name_scope('input'):
    xs = tf.placeholder(tf.float32, [None, 1], name='x_input')
    ys = tf.placeholder(tf.float32, [None, 1], name='y_input')
#输入层一个神经元(因为只有x_data这一个属性),隐藏层10个神经元,输出层一个(因为也只有ydata这一个属性)
l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)
#隐藏层  输入xdata ins为1,输出十个(之前定义隐藏层有10个神经元)。
prediction = add_layer(l1, 10, 1, activation_function=None)
#输出层  输入的是隐藏层计算出的数据, 返回的outputs 十个
with tf.name_scope('loss'):
    loss = tf.reduce_mean(
        tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1]))
#reduce_sum就是求和 是tf的函数, mean就是求平均值。reduction_indices 见函数 【1】是压缩成一行
with tf.name_scope('train'):
    train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
#训练,梯度下降方法 学习效率0.1 是让loss最小

init = tf.global_variables_initializer()
sess = tf.Session()
writer = tf.summary("logs/", sess.graph)

#with tf.Session() as sess:
#sess.run(init)
'''for i in range(1000):
    sess.run(train_step,feed_dict = {xs:x_data,ys:y_data})
    if i%50:
        print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))
            #只要是通过ph进行运算的 都要feed'''
Ejemplo n.º 17
0
Archivo: train.py Proyecto: quyf88/ocr
import tensorflow as tf

model = 'models/Angle-model.pb'  #请将这里的pb文件路径改为自己的
graph = tf.get_default_graph()
graph_def = graph.as_graph_def()
graph_def.ParseFromString(tf.gfile.FastGFile(model, 'rb').read())
tf.import_graph_def(graph_def, name='graph')
print(tf.summary())
Ejemplo n.º 18
0
    def initialize(self, config, num_classes):
        """
            Initialize the graph from scratch according config.
        """
        with self.graph.as_default():
            with self.sess.as_default():
                G_grad_splits = []
                D_grad_splits = []
                average_dict = {}
                concat_dict = {}

                def insert_dict(_dict, k, v):
                    if k in _dict:
                        _dict[k].append(v)
                    else:
                        _dict[k] = [v]

                # Set up placeholders
                h, w = config.image_size
                channels = config.channels
                self.disc_counter = config.disc_counter

                self.mode = config.mode

                self.aux_matcher = imp.load_source(
                    "network_model", config.aux_matcher_definition)

                summaries = []

                self.images = tf.placeholder(tf.float32,
                                             shape=[None, h, w, channels],
                                             name="images")
                self.t = tf.placeholder(tf.float32,
                                        shape=[None, h, w, channels])
                self.learning_rate = tf.placeholder(tf.float32,
                                                    name="learning_rate")
                self.keep_prob = tf.placeholder(tf.float32, name="keep_prob")
                self.phase_train = tf.placeholder(tf.bool, name="phase_train")
                self.global_step = tf.Variable(0,
                                               trainable=False,
                                               dtype=tf.int32,
                                               name="global_step")

                self.setup_network_model(config, num_classes)

                if self.mode == "target":
                    self.perturb, self.G = self.generator(self.images, self.t)
                else:
                    self.perturb, self.G = self.generator(self.images)

                ########################## GAN LOSS ###########################
                self.D_real = self.discriminator(self.images)
                self.D_fake = self.discriminator(self.G)
                d_loss_real = tf.reduce_mean(
                    tf.nn.sigmoid_cross_entropy_with_logits(
                        logits=self.D_real, labels=tf.ones_like(self.D_real)))
                d_loss_fake = tf.reduce_mean(
                    tf.nn.sigmoid_cross_entropy_with_logits(
                        logits=self.D_fake, labels=tf.zeros_like(self.D_fake)))
                g_adv_loss = tf.reduce_mean(
                    tf.nn.sigmoid_cross_entropy_with_logits(
                        logits=self.D_fake, labels=tf.ones_like(self.D_fake)))
                self.d_loss = d_loss_real + d_loss_fake

                ########################## IDENTITY LOSS #######################
                with slim.arg_scope(inception_arg_scope()):
                    self.fake_feat, _ = self.aux_matcher.inference(
                        self.G,
                        bottleneck_layer_size=512,
                        phase_train=False,
                        keep_probability=1.0,
                    )
                    if self.mode == "target":
                        self.real_feat, _ = self.aux_matcher.inference(
                            self.t,
                            bottleneck_layer_size=512,
                            phase_train=False,
                            keep_probability=1.0,
                            reuse=True,
                        )
                    else:
                        self.real_feat, _ = self.aux_matcher.inference(
                            self.images,
                            bottleneck_layer_size=512,
                            phase_train=False,
                            keep_probability=1.0,
                            reuse=True,
                        )
                if self.mode == "target":
                    identity_loss = tf.reduce_mean(
                        1.0 -
                        (tfutils.cosine_pair(self.fake_feat, self.real_feat) +
                         1.0) / 2.0)
                else:
                    identity_loss = tf.reduce_mean(
                        tfutils.cosine_pair(self.fake_feat, self.real_feat))
                identity_loss = config.idt_loss_factor * identity_loss

                ########################## PERTURBATION LOSS #####################
                perturb_loss = config.perturb_loss_factor * \
                            tf.reduce_mean(
                                tf.maximum(tf.zeros((tf.shape(self.perturb)[0])) + config.MAX_PERTURBATION,
                                tf.norm(tf.reshape( self.perturb, (tf.shape(self.perturb)[0], -1)),
                            axis=1)))

                ########################## PIXEL LOSS ############################
                pixel_loss = 1000.0 * tf.reduce_mean(
                    tf.abs(self.G - self.images))

                self.g_loss = g_adv_loss + identity_loss + perturb_loss

                ################### LOSS SUMMARY ###################
                insert_dict(average_dict, "g_loss", self.g_loss)
                insert_dict(average_dict, "d_loss", self.d_loss)
                insert_dict(average_dict, "gadv_loss", g_adv_loss)
                insert_dict(average_dict, "idt_loss", identity_loss)
                insert_dict(average_dict, "prt_loss", perturb_loss)
                insert_dict(average_dict, "pxl_loss", pixel_loss)

                ################# VARIABLES TO UPDATE #################
                G_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                           scope="Generator")
                D_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                           scope="Discriminator")
                self.train_G_op = tf.train.AdamOptimizer(self.learning_rate,
                                                         beta1=0.5,
                                                         beta2=0.9).minimize(
                                                             self.g_loss,
                                                             var_list=G_vars)
                self.train_D_op = tf.train.AdamOptimizer(self.learning_rate,
                                                         beta1=0.5,
                                                         beta2=0.9).minimize(
                                                             self.d_loss,
                                                             var_list=D_vars)

                for k, v in average_dict.items():
                    v = tfutils.average_tensors(v)
                    average_dict[k] = v
                    tfutils.insert(k, v)
                    if "loss" in k:
                        summaries.append(tf.summary.scalar("losses/" + k, v))
                    elif "acc" in k:
                        summaries.append(tf.summary.scalar("acc/" + k, v))
                    else:
                        tf.summary(k, v)
                for k, v in concat_dict.items():
                    v = tf.concat(v, axis=0, name="merged_" + k)
                    concat_dict[k] = v
                    tfutils.insert(k, v)
                trainable_variables = [t for t in tf.trainable_variables()]

                fn = [
                    var for var in tf.trainable_variables()
                    if config.aux_matcher_scope in var.name
                ]
                print(trainable_variables)

                self.update_global_step_op = tf.assign_add(self.global_step, 1)
                summaries.append(
                    tf.summary.scalar("learning_rate", self.learning_rate))
                self.summary_op = tf.summary.merge(summaries)

                self.sess.run(tf.local_variables_initializer())
                self.sess.run(tf.global_variables_initializer())
                self.saver = tf.train.Saver(trainable_variables,
                                            max_to_keep=None)
                f_saver = tf.train.Saver(fn)
                f_saver.restore(self.sess, config.aux_matcher_path)

                self.watch_list = tfutils.get_watchlist()
Ejemplo n.º 19
0
    def __init__(self, data_dir):
        """
            data_directory : path like /home/rajat/mlproj/dataset/
                            includes the dataset folder with '/'
            Initialize all your variables here
        """
        self.data_directory = data_dir

        self.train_len = 33000
        self.track_digits = 0
        self.track_digits_position = 1
        self.track_images = 0
        self.batch_size = 75

        self.sess = tf.InteractiveSession()

        # initializing input images, input lengths and length of sequence
        self.x = tf.placeholder(tf.float32, shape=[None, 4096, 3])
        self.y_digit_length = tf.placeholder(tf.float32, shape=[None, 10])
        self.y_digit1 = tf.placeholder(tf.float32, shape=[None, 10])
        self.y_digit2 = tf.placeholder(tf.float32, shape=[None, 10])
        self.y_digit3 = tf.placeholder(tf.float32, shape=[None, 10])
        self.y_digit4 = tf.placeholder(tf.float32, shape=[None, 10])
        self.y_digit5 = tf.placeholder(tf.float32, shape=[None, 10])

        # x_temp = np.zeros([10,4096,3],dtype=float,order='C')

        # initializing weights and biases and model
        # first conv layer
        self.W_conv1 = self.weight_variable([5, 5, 3, 32])
        self.b_conv1 = self.bias_variable([32])
        self.x_image = tf.reshape(self.x, [-1, 64, 64, 3])
        self.h_conv1 = tf.nn.relu(
            self.conv2d(self.x_image, self.W_conv1) + self.b_conv1)
        self.h_pool1 = self.max_pol_2x2(self.h_conv1)

        # second conv layer
        self.W_conv2 = self.weight_variable([5, 5, 32, 64])
        self.b_conv2 = self.bias_variable([64])
        self.h_conv2 = tf.nn.relu(
            self.conv2d(self.h_pool1, self.W_conv2) + self.b_conv2)
        self.h_pool2 = self.max_pol_2x2(self.h_conv2)
        # printing shape of the image
        # print self.h_pool2.shape

        # third conv layer
        self.W_conv3 = self.weight_variable([5, 5, 64, 128])
        self.b_conv3 = self.bias_variable([128])
        self.h_conv3 = tf.nn.relu(
            self.conv2d(self.h_pool2, self.W_conv3) + self.b_conv3)
        self.h_pool3 = self.max_pol_2x2(self.h_conv3)
        # print self.h_pool3.shape

        # fourth conv layer
        self.W_conv4 = self.weight_variable([5, 5, 128, 200])
        self.b_conv4 = self.bias_variable([200])
        self.h_conv4 = tf.nn.relu(
            self.conv2d(self.h_pool3, self.W_conv4) + self.b_conv4)
        self.h_pool4 = self.max_pol_2x2(self.h_conv4)
        # print self.h_pool4.shape

        # fifth conv layer
        self.W_conv5 = self.weight_variable([3, 3, 200, 300])
        self.b_conv5 = self.bias_variable([300])
        self.h_conv5 = tf.nn.relu(
            self.conv2d(self.h_pool4, self.W_conv5) + self.b_conv5)
        self.h_pool5 = self.max_pol_2x2(self.h_conv5)
        print self.h_pool5.shape

        # densely connected layer shape of the image at this point is 16x16
        self.W_fc1 = self.weight_variable([2 * 2 * 300, 1024])
        self.b_fc1 = self.bias_variable([1024])
        self.h_pool2_flat = tf.reshape(self.h_pool5, [-1, 2 * 2 * 300])
        self.h_fc1 = tf.nn.relu(
            tf.matmul(self.h_pool2_flat, self.W_fc1) + self.b_fc1)
        # print self.h_fc1.shape

        # dropout
        self.keep_prob = tf.placeholder(tf.float32)
        self.h_fc1_drop = tf.nn.dropout(self.h_fc1, self.keep_prob)

        # readout layer and output digits
        self.W_fc2_len = self.weight_variable([1024, 10])
        self.b_fc2_len = self.bias_variable([10])

        self.W_fc2_1 = self.weight_variable([1024, 10])
        self.b_fc2_1 = self.bias_variable([10])

        self.W_fc2_2 = self.weight_variable([1024, 10])
        self.b_fc2_2 = self.bias_variable([10])

        self.W_fc2_3 = self.weight_variable([1024, 10])
        self.b_fc2_3 = self.bias_variable([10])

        self.W_fc2_4 = self.weight_variable([1024, 10])
        self.b_fc2_4 = self.bias_variable([10])

        self.W_fc2_5 = self.weight_variable([1024, 10])
        self.b_fc2_5 = self.bias_variable([10])

        self.y_pred_digit_length = tf.nn.softmax(
            tf.matmul(self.h_fc1_drop, self.W_fc2_len) + self.b_fc2_len)
        self.y_pred_digit1 = tf.nn.softmax(
            tf.matmul(self.h_fc1_drop, self.W_fc2_1) + self.b_fc2_1)
        self.y_pred_digit2 = tf.nn.softmax(
            tf.matmul(self.h_fc1_drop, self.W_fc2_2) + self.b_fc2_2)
        self.y_pred_digit3 = tf.nn.softmax(
            tf.matmul(self.h_fc1_drop, self.W_fc2_3) + self.b_fc2_3)
        self.y_pred_digit4 = tf.nn.softmax(
            tf.matmul(self.h_fc1_drop, self.W_fc2_4) + self.b_fc2_4)
        self.y_pred_digit5 = tf.nn.softmax(
            tf.matmul(self.h_fc1_drop, self.W_fc2_5) + self.b_fc2_5)

        self.cross_entropy_len = tf.reduce_mean(-tf.reduce_sum(
            self.y_digit_length *
            tf.log(tf.clip_by_value(self.y_pred_digit_length, 1e-10, 1.0)),
            reduction_indices=[1]))
        self.cross_entropy1 = tf.reduce_mean(-tf.reduce_sum(
            self.y_digit1 *
            tf.log(tf.clip_by_value(self.y_pred_digit1, 1e-10, 1.0)),
            reduction_indices=[1]))
        self.cross_entropy2 = tf.reduce_mean(-tf.reduce_sum(
            self.y_digit2 *
            tf.log(tf.clip_by_value(self.y_pred_digit2, 1e-10, 1.0)),
            reduction_indices=[1]))
        self.cross_entropy3 = tf.reduce_mean(-tf.reduce_sum(
            self.y_digit3 *
            tf.log(tf.clip_by_value(self.y_pred_digit3, 1e-10, 1.0)),
            reduction_indices=[1]))
        self.cross_entropy4 = tf.reduce_mean(-tf.reduce_sum(
            self.y_digit4 *
            tf.log(tf.clip_by_value(self.y_pred_digit4, 1e-10, 1.0)),
            reduction_indices=[1]))
        self.cross_entropy5 = tf.reduce_mean(-tf.reduce_sum(
            self.y_digit5 *
            tf.log(tf.clip_by_value(self.y_pred_digit5, 1e-10, 1.0)),
            reduction_indices=[1]))

        self.final_entropy = self.cross_entropy1 + self.cross_entropy2 + self.cross_entropy3 + self.cross_entropy4 + self.cross_entropy5 + self.cross_entropy_len
        tf.summary.scalar('final entropy', self.final_entropy)

        self.train_step = tf.train.AdamOptimizer(1e-4).minimize(
            self.final_entropy)

        self.correct_prediction_len = tf.equal(
            tf.argmax(self.y_pred_digit_length, 1),
            tf.argmax(self.y_digit_length, 1))
        self.correct_prediction1 = tf.equal(tf.argmax(self.y_pred_digit1, 1),
                                            tf.argmax(self.y_digit1, 1))
        self.correct_prediction2 = tf.equal(tf.argmax(self.y_pred_digit2, 1),
                                            tf.argmax(self.y_digit2, 1))
        self.correct_prediction3 = tf.equal(tf.argmax(self.y_pred_digit3, 1),
                                            tf.argmax(self.y_digit3, 1))
        self.correct_prediction4 = tf.equal(tf.argmax(self.y_pred_digit4, 1),
                                            tf.argmax(self.y_digit4, 1))
        self.correct_prediction5 = tf.equal(tf.argmax(self.y_pred_digit5, 1),
                                            tf.argmax(self.y_digit5, 1))
        # print self.correct_prediction5.shape
        # determining accuracy
        self.accuracy_len = tf.reduce_mean(
            tf.cast(self.correct_prediction_len, tf.float32))
        self.accuracy_digit1 = tf.reduce_mean(
            tf.cast(self.correct_prediction1, tf.float32))
        self.accuracy_digit2 = tf.reduce_mean(
            tf.cast(self.correct_prediction2, tf.float32))
        self.accuracy_digit3 = tf.reduce_mean(
            tf.cast(self.correct_prediction3, tf.float32))
        self.accuracy_digit4 = tf.reduce_mean(
            tf.cast(self.correct_prediction4, tf.float32))
        self.accuracy_digit5 = tf.reduce_mean(
            tf.cast(self.correct_prediction5, tf.float32))

        self.out1 = tf.argmax(self.y_pred_digit_length, 1)
        self.out2 = tf.argmax(self.y_pred_digit1, 1)
        self.out3 = tf.argmax(self.y_pred_digit2, 1)
        self.out4 = tf.argmax(self.y_pred_digit3, 1)
        self.out5 = tf.argmax(self.y_pred_digit4, 1)
        self.out6 = tf.argmax(self.y_pred_digit5, 1)

        merged = tf.summary.merge_all()
        test_writer = tf.summary(FileWriter(FLAGS.summaries_dir + '/train'))