def testSlideSparse(self):

    def _sparse(i):
      return sparse_tensor.SparseTensorValue(
          indices=[[0]], values=(i * [1]), dense_shape=[1])

    iterator = dataset_ops.Dataset.range(10).map(_sparse).apply(
        sliding.sliding_window_batch(
            window_size=5, window_shift=3)).make_initializable_iterator()
    init_op = iterator.initializer
    get_next = iterator.get_next()

    with self.cached_session() as sess:
      sess.run(init_op)
      num_batches = (10 - 5) // 3 + 1
      for i in range(num_batches):
        actual = sess.run(get_next)
        expected = sparse_tensor.SparseTensorValue(
            indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]],
            values=[i * 3, i * 3 + 1, i * 3 + 2, i * 3 + 3, i * 3 + 4],
            dense_shape=[5, 1])
        self.assertTrue(sparse_tensor.is_sparse(actual))
        self.assertSparseValuesEqual(actual, expected)
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)
  def testSlideSparseWithDifferentDenseShapes(self):

    def _sparse(i):
      return sparse_tensor.SparseTensorValue(
          indices=array_ops.expand_dims(
              math_ops.range(i, dtype=dtypes.int64), 1),
          values=array_ops.fill([math_ops.to_int32(i)], i),
          dense_shape=[i])

    iterator = dataset_ops.Dataset.range(10).map(_sparse).apply(
        sliding.sliding_window_batch(
            window_size=5, window_shift=3)).make_initializable_iterator()
    init_op = iterator.initializer
    get_next = iterator.get_next()

    with self.cached_session() as sess:
      sess.run(init_op)
      num_batches = (10 - 5) // 3 + 1
      for i in range(num_batches):
        actual = sess.run(get_next)
        expected_indices = []
        expected_values = []
        for j in range(5):
          for k in range(i * 3 + j):
            expected_indices.append([j, k])
            expected_values.append(i * 3 + j)
        expected = sparse_tensor.SparseTensorValue(
            indices=expected_indices,
            values=expected_values,
            dense_shape=[5, i * 3 + 5 - 1])
        self.assertTrue(sparse_tensor.is_sparse(actual))
        self.assertSparseValuesEqual(actual, expected)
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)
  def testSlideDatasetInvalid(self, count, window_size, window_shift,
                              window_stride):
    count_t = array_ops.placeholder(dtypes.int64, shape=[])
    window_size_t = array_ops.placeholder(dtypes.int64, shape=[])
    window_shift_t = array_ops.placeholder(dtypes.int64, shape=[])
    window_stride_t = array_ops.placeholder(dtypes.int64, shape=[])

    iterator = (
        dataset_ops.Dataset.range(10).map(lambda x: x).repeat(count_t).apply(
            sliding.sliding_window_batch(
                window_size=window_size_t,
                window_shift=window_shift_t,
                window_stride=window_stride_t)).make_initializable_iterator())
    init_op = iterator.initializer

    with self.cached_session() as sess:
      with self.assertRaises(errors.InvalidArgumentError):
        sess.run(
            init_op,
            feed_dict={
                count_t: count,
                window_size_t: window_size,
                window_shift_t: window_shift,
                window_stride_t: window_stride
            })
  def testNestedSlideSparse(self):

    def _sparse(i):
      return sparse_tensor.SparseTensorValue(
          indices=[[0]], values=(i * [1]), dense_shape=[1])

    iterator = (
        dataset_ops.Dataset.range(10).map(_sparse).apply(
            sliding.sliding_window_batch(window_size=4, window_shift=2)).apply(
                sliding.sliding_window_batch(window_size=3, window_shift=1))
        .make_initializable_iterator())
    init_op = iterator.initializer
    get_next = iterator.get_next()

    with self.cached_session() as sess:
      sess.run(init_op)
      # Slide: 1st batch.
      actual = sess.run(get_next)
      expected = sparse_tensor.SparseTensorValue(
          indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
                   [1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
                   [2, 2, 0], [2, 3, 0]],
          values=[0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7],
          dense_shape=[3, 4, 1])
      self.assertTrue(sparse_tensor.is_sparse(actual))
      self.assertSparseValuesEqual(actual, expected)
      # Slide: 2nd batch.
      actual = sess.run(get_next)
      expected = sparse_tensor.SparseTensorValue(
          indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
                   [1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
                   [2, 2, 0], [2, 3, 0]],
          values=[2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9],
          dense_shape=[3, 4, 1])
      self.assertTrue(sparse_tensor.is_sparse(actual))
      self.assertSparseValuesEqual(actual, expected)
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)
  def testSlideDataset(self, count, window_size, window_shift, window_stride):
    """Tests a dataset that slides a window its input elements."""
    components = (np.arange(7),
                  np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
                  np.array(37.0) * np.arange(7))

    count_t = array_ops.placeholder(dtypes.int64, shape=[])
    window_size_t = array_ops.placeholder(dtypes.int64, shape=[])
    window_shift_t = array_ops.placeholder(dtypes.int64, shape=[])
    window_stride_t = array_ops.placeholder(dtypes.int64, shape=[])

    def _map_fn(x, y, z):
      return math_ops.square(x), math_ops.square(y), math_ops.square(z)

    # The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
    # RepeatDataset(count) ->
    # _SlideDataset(window_size, window_shift, window_stride).
    iterator = (
        dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
        .repeat(count).apply(
            sliding.sliding_window_batch(
                window_size=window_size_t,
                window_shift=window_shift_t,
                window_stride=window_stride_t)).make_initializable_iterator())
    init_op = iterator.initializer
    get_next = iterator.get_next()

    self.assertEqual([[None] + list(c.shape[1:]) for c in components],
                     [t.shape.as_list() for t in get_next])

    with self.cached_session() as sess:
      sess.run(
          init_op,
          feed_dict={
              count_t: count,
              window_size_t: window_size,
              window_shift_t: window_shift,
              window_stride_t: window_stride
          })
      num_batches = (count * 7 - (
          (window_size - 1) * window_stride + 1)) // window_shift + 1
      for i in range(num_batches):
        result = sess.run(get_next)
        for component, result_component in zip(components, result):
          for j in range(window_size):
            self.assertAllEqual(
                component[(i * window_shift + j * window_stride) % 7]**2,
                result_component[j])
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)
  def testSlideShapeError(self):

    def generator():
      yield [1.0, 2.0, 3.0]
      yield [4.0, 5.0, 6.0]
      yield [7.0, 8.0, 9.0, 10.0]

    iterator = dataset_ops.make_initializable_iterator(
        dataset_ops.Dataset.from_generator(
            generator, dtypes.float32, output_shapes=[None]).apply(
                sliding.sliding_window_batch(window_size=3, window_shift=1)))
    next_element = iterator.get_next()

    with self.cached_session() as sess:
      sess.run(iterator.initializer)
      with self.assertRaisesRegexp(
          errors.InvalidArgumentError,
          r"Cannot batch tensors with different shapes in component 0. "
          r"First element had shape \[3\] and element 2 had shape \[4\]."):
        sess.run(next_element)
    def testSlideShapeError(self):
        def generator():
            yield [1.0, 2.0, 3.0]
            yield [4.0, 5.0, 6.0]
            yield [7.0, 8.0, 9.0, 10.0]

        iterator = (dataset_ops.Dataset.from_generator(
            generator, dtypes.float32, output_shapes=[None]).apply(
                sliding.sliding_window_batch(
                    window_size=3,
                    window_shift=1)).make_initializable_iterator())
        next_element = iterator.get_next()

        with self.cached_session() as sess:
            sess.run(iterator.initializer)
            with self.assertRaisesRegexp(
                    errors.InvalidArgumentError,
                    r"Cannot batch tensors with different shapes in component 0. "
                    r"First element had shape \[3\] and element 2 had shape \[4\]."
            ):
                sess.run(next_element)
Beispiel #8
0
    def testSlideDatasetInvalid(self, count, window_size, window_shift,
                                window_stride):
        count_t = array_ops.placeholder(dtypes.int64, shape=[])
        window_size_t = array_ops.placeholder(dtypes.int64, shape=[])
        window_shift_t = array_ops.placeholder(dtypes.int64, shape=[])
        window_stride_t = array_ops.placeholder(dtypes.int64, shape=[])

        iterator = (dataset_ops.Dataset.range(10).map(lambda x: x).repeat(
            count_t).apply(
                sliding.sliding_window_batch(window_size=window_size_t,
                                             window_shift=window_shift_t,
                                             window_stride=window_stride_t)).
                    make_initializable_iterator())
        init_op = iterator.initializer

        with self.cached_session() as sess:
            with self.assertRaises(errors.InvalidArgumentError):
                sess.run(init_op,
                         feed_dict={
                             count_t: count,
                             window_size_t: window_size,
                             window_shift_t: window_shift,
                             window_stride_t: window_stride
                         })
    def testSlideSparse(self):
        def _sparse(i):
            return sparse_tensor.SparseTensorValue(indices=[[0]],
                                                   values=(i * [1]),
                                                   dense_shape=[1])

        iterator = dataset_ops.Dataset.range(10).map(_sparse).apply(
            sliding.sliding_window_batch(5, 3)).make_initializable_iterator()
        init_op = iterator.initializer
        get_next = iterator.get_next()

        with self.test_session() as sess:
            sess.run(init_op)
            num_batches = (10 - 5) // 3 + 1
            for i in range(num_batches):
                actual = sess.run(get_next)
                expected = sparse_tensor.SparseTensorValue(
                    indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]],
                    values=[i * 3, i * 3 + 1, i * 3 + 2, i * 3 + 3, i * 3 + 4],
                    dense_shape=[5, 1])
                self.assertTrue(sparse_tensor.is_sparse(actual))
                self.assertSparseValuesEqual(actual, expected)
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(get_next)
 def testSlideDatasetValueError(self):
   with self.assertRaises(ValueError):
     dataset_ops.Dataset.range(10).map(lambda x: x).apply(
         sliding.sliding_window_batch(
             window_size=1, stride=1, window_shift=1, window_stride=1))
    def testSlideDataset(self):
        """Test an dataset that maps a TF function across its input elements."""
        components = (np.arange(7),
                      np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
                      np.array(37.0) * np.arange(7))

        count = array_ops.placeholder(dtypes.int64, shape=[])
        window_size = array_ops.placeholder(dtypes.int64, shape=[])
        stride = array_ops.placeholder(dtypes.int64, shape=[])

        def _map_fn(x, y, z):
            return math_ops.square(x), math_ops.square(y), math_ops.square(z)

        # The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
        # RepeatDataset(count) -> _SlideDataset(window_size, stride).
        iterator = (dataset_ops.Dataset.from_tensor_slices(components).map(
            _map_fn).repeat(count).apply(
                sliding.sliding_window_batch(
                    window_size, stride)).make_initializable_iterator())
        init_op = iterator.initializer
        get_next = iterator.get_next()

        self.assertEqual([[None] + list(c.shape[1:]) for c in components],
                         [t.shape.as_list() for t in get_next])

        with self.test_session() as sess:
            # Slide over a finite input, where the window_size divides the
            # total number of elements.
            sess.run(init_op,
                     feed_dict={
                         count: 20,
                         window_size: 14,
                         stride: 7
                     })
            # Same formula with convolution layer.
            num_batches = (20 * 7 - 14) // 7 + 1
            for i in range(num_batches):
                result = sess.run(get_next)
                for component, result_component in zip(components, result):
                    for j in range(14):
                        self.assertAllEqual(component[(i * 7 + j) % 7]**2,
                                            result_component[j])
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(get_next)

            # Slide over a finite input, where the window_size does not
            # divide the total number of elements.
            sess.run(init_op,
                     feed_dict={
                         count: 20,
                         window_size: 17,
                         stride: 9
                     })

            num_batches = (20 * 7 - 17) // 9 + 1
            for i in range(num_batches):
                result = sess.run(get_next)
                for component, result_component in zip(components, result):
                    for j in range(17):
                        self.assertAllEqual(component[(i * 9 + j) % 7]**2,
                                            result_component[j])
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(get_next)

            # Slide over a finite input, which is less than window_size,
            # should fail straight away.
            sess.run(init_op, feed_dict={count: 1, window_size: 10, stride: 4})
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(get_next)

            sess.run(init_op, feed_dict={count: 1, window_size: 10, stride: 8})
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(get_next)

            # Slide over an empty input should fail straight away.
            sess.run(init_op, feed_dict={count: 0, window_size: 8, stride: 4})
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(get_next)

            # Empty window_size should be an initialization time error.
            with self.assertRaises(errors.InvalidArgumentError):
                sess.run(init_op,
                         feed_dict={
                             count: 14,
                             window_size: 0,
                             stride: 0
                         })

            # Invalid stride should be an initialization time error.
            with self.assertRaises(errors.InvalidArgumentError):
                sess.run(init_op,
                         feed_dict={
                             count: 14,
                             window_size: 3,
                             stride: 0
                         })
            with self.assertRaises(errors.InvalidArgumentError):
                sess.run(init_op,
                         feed_dict={
                             count: 14,
                             window_size: 3,
                             stride: 3
                         })
            with self.assertRaises(errors.InvalidArgumentError):
                sess.run(init_op,
                         feed_dict={
                             count: 14,
                             window_size: 3,
                             stride: 5
                         })
  def testSlideDataset(self):
    """Test an dataset that maps a TF function across its input elements."""
    components = (np.arange(7),
                  np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
                  np.array(37.0) * np.arange(7))

    count = array_ops.placeholder(dtypes.int64, shape=[])
    window_size = array_ops.placeholder(dtypes.int64, shape=[])
    stride = array_ops.placeholder(dtypes.int64, shape=[])

    def _map_fn(x, y, z):
      return math_ops.square(x), math_ops.square(y), math_ops.square(z)

    # The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
    # RepeatDataset(count) -> _SlideDataset(window_size, stride).
    iterator = (dataset_ops.Dataset.from_tensor_slices(components)
                .map(_map_fn)
                .repeat(count)
                .apply(sliding.sliding_window_batch(window_size, stride))
                .make_initializable_iterator())
    init_op = iterator.initializer
    get_next = iterator.get_next()

    self.assertEqual([[None] + list(c.shape[1:]) for c in components],
                     [t.shape.as_list() for t in get_next])

    with self.test_session() as sess:
      # Slide over a finite input, where the window_size divides the
      # total number of elements.
      sess.run(init_op, feed_dict={count: 20, window_size: 14, stride: 7})
      # Same formula with convolution layer.
      num_batches = (20 * 7 - 14) // 7 + 1
      for i in range(num_batches):
        result = sess.run(get_next)
        for component, result_component in zip(components, result):
          for j in range(14):
            self.assertAllEqual(component[(i*7 + j) % 7]**2,
                                result_component[j])
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)

      # Slide over a finite input, where the window_size does not
      # divide the total number of elements.
      sess.run(init_op, feed_dict={count: 20, window_size: 17, stride: 9})

      num_batches = (20 * 7 - 17) // 9 + 1
      for i in range(num_batches):
        result = sess.run(get_next)
        for component, result_component in zip(components, result):
          for j in range(17):
            self.assertAllEqual(component[(i*9 + j) % 7]**2,
                                result_component[j])
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)

      # Slide over a finite input, which is less than window_size,
      # should fail straight away.
      sess.run(init_op, feed_dict={count: 1, window_size: 10, stride: 4})
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)

      sess.run(init_op, feed_dict={count: 1, window_size: 10, stride: 8})
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)

      # Slide over an empty input should fail straight away.
      sess.run(init_op, feed_dict={count: 0, window_size: 8, stride: 4})
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)

      # Empty window_size should be an initialization time error.
      with self.assertRaises(errors.InvalidArgumentError):
        sess.run(init_op, feed_dict={count: 14, window_size: 0, stride: 0})

      # Invalid stride should be an initialization time error.
      with self.assertRaises(errors.InvalidArgumentError):
        sess.run(init_op, feed_dict={count: 14, window_size: 3, stride: 0})
      with self.assertRaises(errors.InvalidArgumentError):
        sess.run(init_op, feed_dict={count: 14, window_size: 3, stride: 3})
      with self.assertRaises(errors.InvalidArgumentError):
        sess.run(init_op, feed_dict={count: 14, window_size: 3, stride: 5})
def model(learning_rate,num_epochs,mini_size,pt_out,break_t,fil_conv,kernel_ls,decode_l,
            pera_1,pera_2,imp_skip,batch):

    tf.summary.scalar('learning_rate',learning_rate)
    tf.summary.scalar('batch_size',mini_size)
    tf.summary.scalar('epoch_num',num_epochs)
    tf.summary.scalar('out_step',pt_out)
    tf.summary.scalar('training_break',break_t)
    tf.summary.scalar('conv_filter_numbers',fil_conv)
    tf.summary.scalar('kernel_sizes_lstm',kernel_ls)
    tf.summary.scalar('number_of_prediction',decode_l)
    tf.summary.scalar('batch',batch)



    filenames = tf.placeholder(tf.string)

    dataset = tf.data.TFRecordDataset(filenames)
    dataset = dataset.map(_parse_function)
    window = mini_size
    #stride = 1
    dataset = dataset.apply(sliding.sliding_window_batch(window,stride=6))
    dataset = dataset.batch(batch,drop_remainder=True)
    dataset = dataset.shuffle(500)
    dataset = dataset.repeat(num_epochs)
    #dataset = dataset.shuffle(500)
    #iterator =  tf.data.Iterator.from_structure(dataset.output_types,dataset.output_shapes)
    iterator = dataset.make_initializable_iterator(shared_name="iter")

    pix_gt = iterator.get_next()
    #print(pix_gt.get_shape().as_list())
    #print(tf.shape(pix_gt))
    spli0, spli1, spli2, spli3, spli4, spli5 = tf.split(pix_gt,num_or_size_splits=batch,axis=0)

    spli0 = tf.reshape(spli0,[mini_size,256,256,1])
    spli1 = tf.reshape(spli1,[mini_size,256,256,1])
    spli2 = tf.reshape(spli2,[mini_size,256,256,1])
    spli3 = tf.reshape(spli3,[mini_size,256,256,1])
    spli4 = tf.reshape(spli4,[mini_size,256,256,1])
    spli5 = tf.reshape(spli5,[mini_size,256,256,1])

    pix_gt1 = tf.stack([spli0, spli1, spli2, spli3, spli4, spli5], axis=1)
    split0, split1, split2, split3, split4, split5 = tf.split(pix_gt1,
                                        num_or_size_splits=mini_size, axis=0)

    pix_0 = tf.reshape(split0,[batch,256,256,1])
    pix_1 = tf.reshape(split1,[batch,256,256,1])
    pix_2 = tf.reshape(split2,[batch,256,256,1])
    pix_3 = tf.reshape(split3,[batch,256,256,1])
    pix_4 = tf.reshape(split4,[batch,256,256,1])
    pix_5 = tf.reshape(split5,[batch,256,256,1])

    tf.summary.image("input_1",pix_0,3)
    tf.summary.image("input_2",pix_1,3)
    tf.summary.image("input_3",pix_2,3)
    tf.summary.image("input_4",pix_3,3)
    tf.summary.image("input_5",pix_4,3)
    tf.summary.image("input_6",pix_5,3)

    out_1,out_2,out_3,out_4,two_pix,two_pixel_1,two_pixel_2,two_pixel_3,two_pixel_4,two_pixel_5 = encoder_block(pix_0,pix_1,pix_2,pix_3)

    #out_5 = lstm_block(out_1,out_2,out_3,out_4,)
    out_5,out_6 = lstm_block(out_1,out_2,out_3,out_4,pix_4)

    out_pre,out_pre1 = decoder_block(out_5,out_6,two_pixel_5,two_pixel_4,two_pixel_3,two_pixel_2,two_pixel_1,two_pix,
                          skip_try="oka")

    tf.summary.image("prediction1",out_pre,3)
    tf.summary.image("prediction2",out_pre1,3)

    #loss = cost(pixel_pre = out_pre , pixel_gt = pix_4)

    loss = cost(out_pre,out_pre1,pix_4,pix_5,pera_1,pera_2)

    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,name="adam").minimize(loss)

    merge_sum = tf.summary.merge_all()
    file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())

    saver = tf.train.Saver()

    init = tf.global_variables_initializer()

    sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))


    sess.run(init)
    #saver.restore(sess,('/media/antor/Files/main_projects/gitlab/unet_check/tf_models/run-20181104123103/my_model.ckpt'))

    sess.run(iterator.initializer,feed_dict={filenames:"/media/antor/Files/ML/tfrecord/sir_demo/train.tfrecords"})

    mini_cost = 0.0
    counter = 1
    coun = 1
    epoch_cost = 0.0
    epoch = 0
    '''

    while True:
        try:

            if coun in range(imp_skip,31):
                dump_loss = sess.run(loss)
                #print(coun)
                if coun==30:
                    coun=0
                coun+=1

            else:
                _ , temp_cost = sess.run([optimizer,loss])
                mini_cost += temp_cost/pt_out
                epoch_cost += temp_cost/8200

                if counter%pt_out==0:
                    print("mini batch cost of batch " + str(counter) + " is : " + str(mini_cost))
                    mini_cost =0.0



                if counter%100 == 0:
                    s = sess.run(merge_sum)
                    file_writer.add_summary(s,counter)

                #if counter*mini_size>=break_t:
                 #   break

                if counter%10000==0:
                    print("cost after epoch " + str(epoch) + ": " + str(epoch_cost))
                    saver.save(sess,logdir_m+"my_model.ckpt")
                    epoch_cost =0.0
                    epoch+=1

                counter+=1
                #print(coun)
                coun+=1

        except tf.errors.OutOfRangeError:
            print(counter)
            saver.save(sess,logdir_m+"my_model.ckpt")
            break

     '''

    while True:
        try:
            '''
            #if coun in range(imp_skip,31):
            #if coun==5:

                dump_loss = sess.run(loss)
                #print(coun)
                #if coun==30:
                coun=0
                coun+=1

            else:
            '''
            _ , temp_cost = sess.run([optimizer,loss])
            mini_cost += temp_cost/pt_out
            epoch_cost += temp_cost/288

            # if counter%pt_out==0:
            #     print("mini batch cost of batch " + str(counter) + " is : " + str(mini_cost))
            #     mini_cost =0.0



            if counter%288== 0:
                s = sess.run(merge_sum)
                file_writer.add_summary(s,counter)

            #if counter*mini_size>=break_t:
             #   break

            if counter%288==0:
                print("cost after epoch " + str(epoch) + ": " + str(epoch_cost))
                #saver.save(sess,logdir_m+"my_model.ckpt")
                epoch_cost =0.0
                epoch+=1

            counter+=1
            #print(coun)
            coun+=1




        except tf.errors.OutOfRangeError:
            #print(counter)
            #saver.save(sess,logdir_m+"my_model.ckpt")
            break
    sess.close()