예제 #1
0
    def train(self, config):
        print("\nPrepare Data...\n")
        input_setup(config)
        data_dir = get_data_dir(config.checkpoint_dir, config.is_train)
        data_num = get_data_num(data_dir)

        images_shape = [None, self.image_size, self.image_size, self.c_dim]
        labels_shape = [None, self.image_size * self.scale, self.image_size * self.scale, self.c_dim]
        self.build_model(images_shape, labels_shape)
        self.train_op = tf.train.AdamOptimizer(learning_rate=config.learning_rate).minimize(self.loss)
        tf.global_variables_initializer().run(session=self.sess) 
        # merged_summary_op = tf.summary.merge_all()
        # summary_writer = tf.summary.FileWriter(config.checkpoint_dir, self.sess.graph)

        counter = self.load(config.checkpoint_dir)
        time_ = time.time()
        print("\nNow Start Training...\n")
        for ep in range(config.epoch):
            # Run by batch images
            batch_idxs = data_num // config.batch_size
            for idx in range(0, batch_idxs):
                batch_images, batch_labels = get_batch(data_dir, data_num, config.batch_size)
                counter += 1

                _, err = self.sess.run([self.train_op, self.loss], feed_dict={self.images: batch_images, self.labels: batch_labels})

                if counter % 10 == 0:
                    print("Epoch: [%2d], batch: [%2d/%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" % ((ep+1), idx, batch_idxs, counter, time.time()-time_, err))
                if counter % 100 == 0:
                    self.save(config.checkpoint_dir, counter)
                    # summary_str = self.sess.run(merged_summary_op)
                    # summary_writer.add_summary(summary_str, counter)
                
                if counter > 0 and counter == batch_idxs * config.epoch:
                    return
예제 #2
0
  def train(self, config):
    if config.is_train:
      input_setup(self.sess, config)
    else:
      nx, ny, pad_h, pad_w = input_setup(self.sess, config)

    if config.is_train:
      data_dir = os.path.join('./{}'.format(config.checkpoint_dir), "train.h5")
    else:
      data_dir = os.path.join('./{}'.format(config.checkpoint_dir), "test.h5")

    train_data, train_label = read_data(data_dir)

    # Stochastic gradient descent
    self.train_op = tf.train.MomentumOptimizer(config.learning_rate,0.9).minimize(self.loss)

    tf.global_variables_initializer().run()
    
    counter = 0
    start_time = time.time()

    if self.load(self.checkpoint_dir):
      print(" [*] Load SUCCESS")
    else:
      print(" [!] Load failed...")

    if config.is_train:
      print("Training...")

      for ep in xrange(config.epoch):
        # Run by batch images
        batch_idxs = len(train_data) // config.batch_size
        for idx in xrange(0, batch_idxs):
          batch_images = train_data[idx*config.batch_size : (idx+1)*config.batch_size]
          batch_labels = train_label[idx*config.batch_size : (idx+1)*config.batch_size]

          counter += 1
          _, err = self.sess.run([self.train_op, self.loss], feed_dict={self.images: batch_images, self.labels: batch_labels})

          if counter % 10 == 0:
            print("Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" \
              % ((ep+1), counter, time.time()-start_time, err))

          if counter % 500 == 0:
            self.save(config.checkpoint_dir, counter)

    else:
      print("Testing...")

      result = self.pred.eval({self.images: train_data, self.labels: train_label})

      result = merge(result, [nx, ny])
      result = result.squeeze()

      # change back to original size
      h, w = np.shape(result)
      result = result[0:(h-pad_h), 0:(w-pad_w)]
      image_path = os.path.join(os.getcwd(), config.sample_dir)
      image_path = os.path.join(image_path, "test.png")
      imsave(result, image_path)
예제 #3
0
    def train(self, config):
        print("\nPrepare Data...\n")
        input_setup(config)
        data_dir = get_data_dir(config)
        data_num = get_data_num(data_dir)

        self.train_op = tf.train.AdamOptimizer(
            learning_rate=config.learning_rate).minimize(self.loss)
        tf.initialize_all_variables().run()

        # merged_summary_op = tf.summary.merge_all()
        # summary_writer = tf.summary.FileWriter(config.checkpoint_dir, self.sess.graph)

        counter = 0
        time_ = time.time()

        self.load(config.checkpoint_dir)
        # Train
        if config.is_train:
            print("\nNow Start Training...\n")
            for ep in range(config.epoch):
                # Run by batch images
                batch_idxs = data_num // config.batch_size
                for idx in range(0, batch_idxs):
                    batch_images, batch_labels = get_batch(
                        data_dir, idx, config.batch_size)
                    counter += 1
                    _, err = self.sess.run([self.train_op, self.loss],
                                           feed_dict={
                                               self.images: batch_images,
                                               self.labels: batch_labels
                                           })

                    if counter % 10 == 0:
                        print(
                            "Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]"
                            % ((ep + 1), counter, time.time() - time_, err))
                    if counter % 100 == 0:
                        self.save(config.checkpoint_dir, counter)
                        # summary_str = self.sess.run(merged_summary_op)
                        # summary_writer.add_summary(summary_str, counter)
        # Test
        else:
            print("\nNow Start Testing...\n")
            time_ = time.time()
            input_, label_ = get_batch(data_dir, 0, 1)
            result = self.sess.run([self.pred],
                                   feed_dict={
                                       self.images:
                                       input_[0].reshape(
                                           1, self.h, self.w, self.c_dim)
                                   })
            print "time:", (time.time() - time_)
            x = np.squeeze(result)
            checkimage(x)
            print "shape:", x.shape
            imsave(x, config.result_dir + '/result.png', config)
예제 #4
0
파일: model.py 프로젝트: WxlSky/ESPCN-1
    def train(self, config):

        # NOTE : if train, the nx, ny are ingnored
        input_setup(config)

        data_dir = checkpoint_dir(config)

        input_, label_ = read_data(data_dir)

        print(input_.shape, label_.shape)
        print(config.is_train)

        # Stochastic gradient descent with the standard backpropagation
        self.train_op = tf.train.AdamOptimizer(
            learning_rate=config.learning_rate).minimize(self.loss)
        tf.initialize_all_variables().run()

        counter = 0
        time_ = time.time()

        self.load(config.checkpoint_dir)
        # Train
        if config.is_train:
            print("Now Start Training...")
            for ep in range(config.epoch):
                # Run by batch images
                batch_idxs = len(input_) // config.batch_size
                for idx in range(0, batch_idxs):
                    batch_images = input_[idx * config.batch_size:(idx + 1) *
                                          config.batch_size]
                    batch_labels = label_[idx * config.batch_size:(idx + 1) *
                                          config.batch_size]
                    counter += 1
                    _, err = self.sess.run([self.train_op, self.loss],
                                           feed_dict={
                                               self.images: batch_images,
                                               self.labels: batch_labels
                                           })

                    if counter % 10 == 0:
                        print(
                            "Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]"
                            % ((ep + 1), counter, time.time() - time_, err))
                        #print(label_[1] - self.pred.eval({self.images: input_})[1],'loss:]',err)
                    if counter % 500 == 0:
                        self.save(config.checkpoint_dir, counter)
        # Test
        else:
            print("Now Start Testing...")
            result = self.pred.eval({
                self.images:
                input_[0].reshape(1, self.h, self.w, self.c_dim)
            })
            x = np.squeeze(result)
            # checkimage(x)
            print(x.shape)
            imsave(x, 'result/result.png', config)
예제 #5
0
  def train(self, config):
    if config.is_train:
      input_setup(self.sess, config)
    else:
      nx, ny, arr = input_setup(self.sess, config)
      print(np.shape(arr))

    if config.is_train:     
      data_dir = os.path.join('./{}'.format(config.checkpoint_dir), "train.h5")
      train_data, train_label = read_data(data_dir,is_train=True)

    # Stochastic gradient descent with the standard backpropagation
    self.train_op = tf.train.GradientDescentOptimizer(config.learning_rate).minimize(self.loss)

    tf.global_variables_initializer().run()
    
    counter = 0
    start_time = time.time()

    if self.load(self.checkpoint_dir):
      print(" [*] Load SUCCESS")
    else:
      print(" [!] Load failed...")

    if config.is_train:
      print("Training...")

      for ep in range(config.epoch):
        # Run by batch images
        batch_idxs = len(train_data) // config.batch_size
        for idx in range(0, batch_idxs):
          batch_images = train_data[idx*config.batch_size : (idx+1)*config.batch_size]
          batch_labels = train_label[idx*config.batch_size : (idx+1)*config.batch_size]

          counter += 1
          _, err = self.sess.run([self.train_op, self.loss], feed_dict={self.images: batch_images, self.labels: batch_labels})

          if counter % 10 == 0:
            print("Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" \
              % ((ep+1), counter, time.time()-start_time, err))

          if counter % 500 == 0:
            self.save(config.checkpoint_dir, counter)

    else:
      print("Testing...")
      for i in range(len(arr)):
        image = np.zeros((nx[i]*config.stride,ny[i]*config.stride,3))
        for j in range(3):         
          result = self.pred.eval({self.images: arr[i][:,:,:,j].reshape([nx[i]*ny[i], config.image_size, config.image_size, 1])})
          result = merge(result, [nx[i], ny[i]])
          result = result.squeeze()        
          image[:, :, j] = result

        image_path = os.path.join(os.getcwd(), config.sample_dir)
        image_path = os.path.join(image_path, "test_output%03d.png"%i)
        imsave(image, image_path)
예제 #6
0
    def train(args):

        if args.is_train:
            input_setup(args)
        else:
            nx, ny = input_setup(args)

        counter = 0
        start_time = time.time()

        if args.is_train:
            print("Training...")
            data_dir = os.path.join('./{}'.format(args.checkpoint_dir),
                                    "train.h5")
            train_data, train_label = read_data(data_dir)

            display_step = 5
            for step in range(args.epochs):
                batch_idxs = len(train_data) // args.batch_size

                for idx in range(0, batch_idxs):

                    batch_images = train_data[idx * args.batch_size:(idx + 1) *
                                              args.batch_size]
                    batch_labels = train_label[idx *
                                               args.batch_size:(idx + 1) *
                                               args.batch_size]
                    run_optimization(batch_images, batch_labels)

                    if step % display_step == 0:
                        pred = srcnn(batch_images)
                        loss = mse(pred, batch_labels)
                        #psnr_loss = psnr(batch_labels, pred)
                        #acc = accuracy(pred, batch_y)

                        #print("step: %i, loss: %f", "psnr_loss: %f" %(step, loss, psnr_loss))
                        #print("Step:'{0}', Loss:'{1}', PSNR: '{2}'".format(step, loss, psnr_loss))

                        print("step: %i, loss: %f" % (step, loss))

        else:
            print("Testing...")
            data_dir = os.path.join('./{}'.format(args.checkpoint_dir),
                                    "test.h5")
            test_data, test_label = read_data(data_dir)

            result = srcnn(test_data)
            result = merge(result, [nx, ny])
            result = result.squeeze()

            image_path = os.path.join(os.getcwd(), args.sample_dir)
            image_path = os.path.join(image_path, "test_image.png")
            print(result.shape)
            imsave(result, image_path)
예제 #7
0
  def train(self, config):
    if config.is_train:
      input_setup(self.sess, config)
    else:
      nx, ny = input_setup(self.sess, config)

    if config.is_train:     
      data_dir = os.path.join('./{}'.format(config.checkpoint_dir), "train.h5")
    else:
      data_dir = os.path.join('./{}'.format(config.checkpoint_dir), "test.h5")

    train_data, train_label = read_data(data_dir)

    # Stochastic gradient descent with the standard backpropagation
    self.train_op = tf.train.GradientDescentOptimizer(config.learning_rate).minimize(self.loss)

    tf.initialize_all_variables().run()
    
    counter = 0
    start_time = time.time()

    self.loadModel()

    if config.is_train:
      print("Training...")
      loss = []
      for ep in xrange(config.epoch):
        err = 1e10
        # Run by batch images
        batch_idxs = len(train_data) // config.batch_size
        for idx in xrange(0, batch_idxs):
          batch_images = train_data[idx*config.batch_size : (idx+1)*config.batch_size]
          batch_labels = train_label[idx*config.batch_size : (idx+1)*config.batch_size]

          counter += 1
          _, err = self.sess.run([self.train_op, self.loss], feed_dict={self.images: batch_images, self.labels: batch_labels})

          if counter % 10 == 0: # why 10? 
            print("Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" \
              % ((ep+1), counter, time.time()-start_time, err))

          if counter % 500 == 0:
            self.save(config.checkpoint_dir, counter)
        loss.append(err)
    plt.title("SRCNN Train")
    plt.xlabel("epoch")
    plt.ylabel("Loss - MSE")
    plt.plot(range(config.epoch), loss)
    plt.savefig("./train_loss.png")
예제 #8
0
    def train(self, config):
        err_li = []
        # NOTE : if train, the nx, ny are ingnored
        nx, ny = input_setup(config)

        data_dir = checkpoint_dir(config)

        input_, label_ = read_data(data_dir)
        # Stochastic gradient descent with the standard backpropagation
        #self.train_op = tf.train.GradientDescentOptimizer(config.learning_rate).minimize(self.loss)
        self.train_op = tf.train.AdamOptimizer(
            learning_rate=config.learning_rate).minimize(self.loss)  #最小化w,b
        tf.initialize_all_variables().run()  #session開始run

        counter = 0
        time_ = time.time()

        self.load(config.checkpoint_dir)
        # Train
        if config.is_train:
            print("Now Start Training...")
            for ep in range(config.epoch):  #總過跑幾次epoch
                # Run by batch images
                batch_idxs = len(input_) // config.batch_size
                for idx in range(0, batch_idxs):  #每次跑128個batch
                    batch_images = input_[idx * config.batch_size:(idx + 1) *
                                          config.batch_size]
                    batch_labels = label_[idx * config.batch_size:(idx + 1) *
                                          config.batch_size]
                    counter += 1
                    _, err = self.sess.run([self.train_op, self.loss],
                                           feed_dict={
                                               self.images: batch_images,
                                               self.labels: batch_labels
                                           })
                    err_li.append(
                        err)  #feed_dict會傳到build model的self.image和self.label裡
                    if counter % 10 == 0:
                        print(
                            "Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]"
                            % ((ep + 1), counter, time.time() - time_, err))
                        #print(label_[1] - self.pred.eval({self.images: input_})[1],'loss:]',err)
                    if counter % 500 == 0:
                        self.save(config.checkpoint_dir, counter)
        # Test
        else:
            print("Now Start Testing...")
            #print("nx","ny",nx,ny)

            result = self.pred.eval({self.images: input_})
            #print(label_[1] - result[1])
            image = merge(result, [nx, ny], self.c_dim)

            #checkimage(image)

            imsave(image, config.result_dir + '/result.png', config)
예제 #9
0
파일: model.py 프로젝트: WxlSky/ESPCN-1
    def test(self, config):
        input_setup(config)

        data_dir = checkpoint_dir(config)

        input_, label_ = read_data(data_dir)

        print(input_.shape, label_.shape)
        print(config.is_train)
        counter = 0
        time_ = time.time()

        self.load(config.checkpoint_dir)
        print("Now Start Testing...")
        result = self.pred.eval(
            {self.images: input_[0].reshape(1, self.h, self.w, self.c_dim)})
        x = np.squeeze(result)
        checkimage(x)
        print(x.shape)
        imsave(x, 'result/result2.png', config)
예제 #10
0
파일: network.py 프로젝트: jiayi-ma/GANMcC
  def train(self, config):
    if config.is_train:
      input_setup(self.sess, config,"Train_ir")
      input_setup(self.sess,config,"Train_vi")
    else:
      nx_ir, ny_ir = input_setup(self.sess, config,"Test_ir")
      nx_vi,ny_vi=input_setup(self.sess, config,"Test_vi")

    if config.is_train:     
      data_dir_ir = os.path.join('./{}'.format(config.checkpoint_dir), "Train_ir","train.h5")
      data_dir_vi = os.path.join('./{}'.format(config.checkpoint_dir), "Train_vi","train.h5")
    else:
      data_dir_ir = os.path.join('./{}'.format(config.checkpoint_dir),"Test_ir", "test.h5")
      data_dir_vi = os.path.join('./{}'.format(config.checkpoint_dir),"Test_vi", "test.h5")

    train_data_ir, train_label_ir = read_data(data_dir_ir)
    train_data_vi, train_label_vi = read_data(data_dir_vi)
    t_vars = tf.trainable_variables()
    self.d_vars = [var for var in t_vars if 'discriminator' in var.name]
    print(self.d_vars)
    self.g_vars = [var for var in t_vars if 'fusion_model' in var.name]
    print(self.g_vars)

    with tf.name_scope('train_step'):
        self.train_fusion_op = tf.train.AdamOptimizer(config.learning_rate).minimize(self.g_loss_total,var_list=self.g_vars)
        self.train_discriminator_op=tf.train.AdamOptimizer(config.learning_rate).minimize(self.d_loss,var_list=self.d_vars)

    self.summary_op = tf.summary.merge_all()
    self.train_writer = tf.summary.FileWriter(config.summary_dir + '/train',self.sess.graph,flush_secs=60)
    
    tf.initialize_all_variables().run()
    
    counter = 0
    start_time = time.time()

    if config.is_train:
      print("Training...")

      for ep in xrange(config.epoch):
        # Run by batch images
        batch_idxs = len(train_data_ir) // config.batch_size
        for idx in xrange(0, batch_idxs):
          batch_images_ir = train_data_ir[idx*config.batch_size : (idx+1)*config.batch_size]
          batch_labels_ir = train_label_ir[idx*config.batch_size : (idx+1)*config.batch_size]
          batch_images_vi = train_data_vi[idx*config.batch_size : (idx+1)*config.batch_size]
          batch_labels_vi = train_label_vi[idx*config.batch_size : (idx+1)*config.batch_size]

          counter += 1
          for i in range(2):
            _, err_d= self.sess.run([self.train_discriminator_op, self.d_loss], feed_dict={self.images_ir: batch_images_ir, self.images_vi: batch_images_vi, self.labels_vi: batch_labels_vi,self.labels_ir:batch_labels_ir})
          _, err_g,summary_str= self.sess.run([self.train_fusion_op, self.g_loss_total,self.summary_op], feed_dict={self.images_ir: batch_images_ir, self.images_vi: batch_images_vi, self.labels_ir: batch_labels_ir,self.labels_vi:batch_labels_vi})
          self.train_writer.add_summary(summary_str,counter)

          if counter % 10 == 0:
            print("Epoch: [%2d], step: [%2d], time: [%4.4f], loss_d: [%.8f],loss_g:[%.8f]" \
              % ((ep+1), counter, time.time()-start_time, err_d,err_g))
        self.save(config.checkpoint_dir, ep)
예제 #11
0
파일: model.py 프로젝트: zrongcheng/srcnn
    def build_model(self):
        #input
        if self.is_train:
            self.images = tf.placeholder(
                tf.float32,
                [None, self.image_size, self.image_size, self.c_dim],
                name='images')
            self.labels = tf.placeholder(
                tf.float32,
                [None, self.label_size, self.label_size, self.c_dim],
                name='labels')
        else:
            _, _, self.image_size, self.label_size = input_setup(FLAGS)
            self.images = tf.placeholder(
                tf.float32,
                [None, self.image_size, self.label_size, self.c_dim],
                name='images')
            self.labels = tf.placeholder(
                tf.float32,
                [None, self.image_size - 12, self.label_size - 12, self.c_dim],
                name='labels')
        #filters
        self.weights = {
            'w1':
            tf.Variable(tf.random_normal([9, 9, 1, 64], stddev=1e-3),
                        name='w1'),
            'w2':
            tf.Variable(tf.random_normal([1, 1, 64, 32], stddev=1e-3),
                        name='w2'),
            'w3':
            tf.Variable(tf.random_normal([5, 5, 32, 1], stddev=1e-3),
                        name='w3')
        }
        #变量w1,w2,w3声明之后并没有被赋值,需要
        # 在Session中调用run(tf.global_variables_initializer())方法初始化之后才会被具体赋值。
        self.biases = {
            'b1': tf.Variable(tf.zeros([64]), name='b1'),
            'b2': tf.Variable(tf.zeros([32]), name='b2'),
            'b3': tf.Variable(tf.zeros([1]), name='b3')
        }

        self.pred = self.model()

        # Loss function (MSE)
        self.loss = tf.reduce_mean(tf.square(self.labels - self.pred))

        self.saver = tf.train.Saver()
예제 #12
0
  def test(self, config):
    """
    Output: bicubic image and SRCNN image pairs
    """
    self.loadModel()
    image_paths = prepare_data(self.sess, dataset="Test")
    avg_psnr_srcnn = 0
    avg_psnr_bicubic = 0
    for image_path in image_paths:
        image_dir, image_name = os.path.split(image_path)
        nx, ny, bicubic_img, ground_truth = input_setup(self.sess, config, image_path)
        data_dir = os.path.join('./{}'.format(config.checkpoint_dir), "test.h5")

        train_data, train_label = read_data(data_dir) # train_data(bicubic):(33, 33); train_label(gt):(21, 21)
        result = self.pred.eval({self.images: train_data, self.labels: train_label}) 

        PSNR_bicubic = PSNR(train_data, train_label)
        PSNR_srcnn = PSNR(result, train_label)
        avg_psnr_bicubic += PSNR_bicubic
        avg_psnr_srcnn += PSNR_srcnn

        result = merge(result, [nx, ny]) # result(SRCNN):(21, 21)
        result = result.squeeze()
        image_dir = os.path.join(os.getcwd(), config.sample_dir)
        image_path = os.path.join(image_dir, image_name)
        bicubic_path = os.path.join(image_dir, "bicubic_"+image_name)

        px = 1/plt.rcParams['figure.dpi']  # pixel in inches
        width = max(ground_truth.shape[0], ground_truth.shape[1])
        
        # plot image
        plt.figure(image_name, figsize=(2*width*px,3*width*px))
        ax1 = plt.subplot(3,1,1)
        ax1.set_title("SRCNN - PSNR - " + str(PSNR_srcnn))
        plt.imshow(toimage(result), cmap='gray')
        ax2 = plt.subplot(3,1,2)
        ax2.set_title("Bicubic - PSNR - " + str(PSNR_bicubic))
        plt.imshow(toimage(bicubic_img), cmap='gray')
        ax3 = plt.subplot(3,1,3)
        ax3.set_title("Ground Truth")
        plt.imshow(toimage(ground_truth), cmap='gray')
        plt.savefig(image_path)
        plt.close()
    avg_psnr_srcnn /= len(image_paths)
    avg_psnr_bicubic /= len(image_paths)
    print("average PSNR of srcnn = {}\n average PSNR of bicubic = {}".format(avg_psnr_srcnn, avg_psnr_bicubic))
예제 #13
0
    def test(self, config):
        """
        Testing process.
        """
        print("Testing...")

        # Load checkpoint
        if self.load(self.checkpoint_dir, config.scale):
            print(" [*] Load SUCCESS")
        else:
            print(" [!] Load failed...")

        nx, ny = input_setup(self.sess, config)

        data_dir = os.path.join('./{}'.format(config.checkpoint_dir),
                                "test.h5")

        test_data, test_label = read_data(data_dir)

        result = self.pred.eval({
            self.images: test_data,
            self.labels: test_label
        })

        result = merge(result, [nx, ny])
        result = result.squeeze()

        # Save output image
        output_path = os.path.join(os.getcwd(), config.output_dir)
        image_path = os.path.join(output_path, "test_img.png")
        imsave(result, image_path)

        # PSNR
        label_path = os.path.join(output_path, "test_org_img.png")
        bicubic_path = os.path.join(output_path, "test_bicubic_img.png")

        bicubic_img = imread(bicubic_path, is_grayscale=True)
        label_img = imread(label_path, is_grayscale=True)
        output_img = imread(image_path, is_grayscale=True)

        bicubic_psnr_value = psnr(label_img, bicubic_img)
        srcnn_psnr_value = psnr(label_img, output_img)

        print("Bicubic PSNR: [{}]".format(bicubic_psnr_value))
        print("SRCNN PSNR: [{}]".format(srcnn_psnr_value))
예제 #14
0
    def train(self, config):
        print("\nPrepare Data...\n")
        data = input_setup(config)
        if len(data) == 0:
            print("\nCan Not Find Training Data!\n")
            return

        data_dir = get_data_dir(config.checkpoint_dir, config.is_train)
        data_num = get_data_num(data_dir)
        batch_num = data_num // config.batch_size

        images_shape = [None, self.image_size, self.image_size, self.c_dim]
        labels_shape = [
            None, self.image_size * self.scale, self.image_size * self.scale,
            self.c_dim
        ]
        self.build_model(images_shape, labels_shape)

        counter = self.load(config.checkpoint_dir, restore=False)
        epoch_start = counter // batch_num
        batch_start = counter % batch_num

        global_step = tf.Variable(counter, trainable=False)
        learning_rate = tf.train.exponential_decay(config.learning_rate,
                                                   global_step,
                                                   config.lr_decay_steps *
                                                   batch_num,
                                                   config.lr_decay_rate,
                                                   staircase=True)
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        learning_step = optimizer.minimize(self.loss, global_step=global_step)

        tf.global_variables_initializer().run(session=self.sess)

        merged_summary_op = tf.summary.merge_all()
        summary_writer = tf.summary.FileWriter(
            (os.path.join(config.checkpoint_dir, self.model_name, "log")),
            self.sess.graph)

        self.load(config.checkpoint_dir, restore=True)
        print("\nNow Start Training...\n")
        for ep in range(epoch_start, config.epoch):
            # Run by batch images
            for idx in range(batch_start, batch_num):
                batch_images, batch_labels = get_batch(data_dir, data_num,
                                                       config.batch_size)
                counter += 1

                _, err, lr = self.sess.run(
                    [learning_step, self.loss, learning_rate],
                    feed_dict={
                        self.images: batch_images,
                        self.labels: batch_labels
                    })

                if counter % 10 == 0:
                    print(
                        "Epoch: [%4d], batch: [%6d/%6d], loss: [%.8f], lr: [%.6f], step: [%d]"
                        % ((ep + 1), (idx + 1), batch_num, err, lr, counter))
                if counter % 10000 == 0:
                    self.save(config.checkpoint_dir, counter)

                    summary_str = self.sess.run(merged_summary_op,
                                                feed_dict={
                                                    self.images: batch_images,
                                                    self.labels: batch_labels
                                                })
                    summary_writer.add_summary(summary_str, counter)

                if counter > 0 and counter == batch_num * config.epoch:
                    self.save(config.checkpoint_dir, counter)
                    break

        summary_writer.close()
예제 #15
0
파일: model.py 프로젝트: zrongcheng/srcnn
    def train(self, config):
        if config.is_train:  #判断是否训练
            input_setup(config)
        else:
            input, label, _, _ = input_setup(config)

        if config.is_train:
            data_dir = os.path.join('./{}'.format(config.checkpoint_dir),
                                    "train.h5")
            train_data, train_label = read_data(data_dir)
            #print(train_data.shape, train_label.shape)

        # Stochastic gradient descent with the standard backpropagation
        #self.train_op = tf.train.AdamOptimizer(config.learning_rate).minimize(self.loss)
        var1 = tf.trainable_variables()[0:2]  #w1,w2
        var1 = var1 + tf.trainable_variables()[3:5]  #b1,b2
        var2 = tf.trainable_variables()[2]  #w3
        var2 = [var2, tf.trainable_variables()[5]]  #b3
        train_op1 = tf.train.AdamOptimizer(0.0001).minimize(self.loss,
                                                            var_list=var1)
        train_op2 = tf.train.AdamOptimizer(0.00001).minimize(self.loss,
                                                             var_list=var2)
        self.train_op = tf.group(train_op1, train_op2)

        tf.global_variables_initializer().run()
        counter = 0
        start_time = time.time()

        # if self.load(self.checkpoint_dir):
        #   print(" [*] Load SUCCESS")
        # else:
        #   print(" [!] Load failed...")
        if config.is_train:

            print("Training...")
            batch_idxs = len(train_data) // config.batch_size
            #print(train_data[0 * config.batch_size: (0 + 1) * config.batch_size])
            for ep in range(config.epoch):
                # Run by batch images
                #batch_idxs = len(train_data) // config.batch_size
                for idx in range(0, batch_idxs):
                    batch_images = train_data[idx *
                                              config.batch_size:(idx + 1) *
                                              config.batch_size]
                    batch_labels = train_label[idx *
                                               config.batch_size:(idx + 1) *
                                               config.batch_size]

                    counter += 1
                    _, err = self.sess.run([self.train_op, self.loss],
                                           feed_dict={
                                               self.images: batch_images,
                                               self.labels: batch_labels
                                           })

                    if counter % 10 == 0:
                        print("Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" \
                              % ((ep+1), counter, time.time()-start_time, err))

                    if counter % 400 == 0:
                        self.save(config.checkpoint_dir, counter)
        #add term

        else:
            print("Testing...")
            if self.load(self.checkpoint_dir):
                print(" [*] Load SUCCESS")
            else:
                print(" [!] Load failed...")

            result = self.pred.eval(feed_dict={
                self.images: input,
                self.labels: label
            })
            #print(result.shape)
            #result = merge(result*255, [nx, ny])
            result = result.squeeze()
            result = result * 255
            image_path = os.path.join(os.getcwd(), config.sample_dir)
            image_path = os.path.join(image_path, "test_image.png")
            imsave(result, image_path)
예제 #16
0
def prepare_data(config):

    # Prepares data if load_existing_data is False
    if not config.load_existing_data:
        input_setup(config)

    # Loads data from data_dir
    print('Loading data...')
    data_dir = checkpoint_dir(config)
    input_, label_, paths_ = read_data(data_dir, config)

    # Shuffles training data
    print('Shuffling data...')
    numData = np.arange(input_.shape[0])
    np.random.shuffle(numData)
    input_ = input_[numData]
    label_ = label_[numData]

    # Prepares frame sets for feeding into different spatial
    # transformers if training mode is 2
    if FLAGS.train_mode == 2:
        print("Preparing frames sets for spatial transformers...")

        curr_prev_imgs = input_[:, :, :, 0:(2 * config.c_dim)]
        curr_next_imgs = np.concatenate(
            (input_[:, :, :, 0:config.c_dim],
             input_[:, :, :, (2 * config.c_dim):(3 * config.c_dim)]),
            axis=3)

        curr_prev_imgs = tf.cast(curr_prev_imgs, tf.float32)
        curr_next_imgs = tf.cast(curr_next_imgs, tf.float32)
        label_ = tf.cast(label_, tf.float32)

        # Provides data in batch one at a time to tf.train.batch
        input_queue = tf.train.slice_input_producer(
            [curr_prev_imgs, curr_next_imgs, label_], shuffle=False)
        x1, x2, y = tf.train.batch(input_queue, batch_size=FLAGS.batch_size)
        return x1, x2, y

    elif FLAGS.train_mode == 4:

        # Upscales input data using bicubic interpolation
        print('Upscaling training data using Bicubic Interpolation...')

        input_new = []
        for i in range(len(input_)):
            input_new.append(
                sp.misc.imresize(input_[i], (config.image_size * config.scale,
                                             config.image_size * config.scale),
                                 interp='bicubic'))
        input_ = np.array(input_new)

        input_ = tf.cast(input_, tf.float32)
        label_ = tf.cast(label_, tf.float32)

        # Provides data in batch one at a time to tf.train.batch
        input_queue = tf.train.slice_input_producer([input_, label_],
                                                    shuffle=False)
        x1, y = tf.train.batch(input_queue, batch_size=FLAGS.batch_size)
        return x1, y

    else:
        input_ = tf.cast(input_, tf.float32)
        label_ = tf.cast(label_, tf.float32)

        # Provides data in batch one at a time to tf.train.batch
        input_queue = tf.train.slice_input_producer([input_, label_],
                                                    shuffle=False)
        x1, y = tf.train.batch(input_queue, batch_size=FLAGS.batch_size)
        return x1, y
예제 #17
0
    def train(self, config):

        # NOTE : if train, the nx, ny are ingnored
        nx, ny = input_setup(config)

        data_dir = checkpoint_dir(config)

        input_, label_ = read_data(data_dir)

        # Stochastic gradient descent with tself.des_block_ALLhe standard backpropagation
        self.train_op = tf.train.AdamOptimizer(
            learning_rate=config.learning_rate).minimize(self.loss)
        tf.initialize_all_variables().run()

        counter = 0
        time_ = time.time()

        self.load(config.checkpoint_dir)
        # Train
        if config.is_train:
            print("Now Start Training...")
            for ep in range(config.epoch):
                # Run by batch images
                batch_idxs = len(input_) // config.batch_size
                for idx in range(0, batch_idxs):
                    batch_images = input_[idx * config.batch_size:(idx + 1) *
                                          config.batch_size]
                    batch_labels = label_[idx * config.batch_size:(idx + 1) *
                                          config.batch_size]
                    counter += 1
                    _, err = self.sess.run(
                        [self.train_op, self.loss],
                        feed_dict={
                            self.images:
                            batch_images,
                            self.labels:
                            batch_labels,
                            self.batch:
                            1,
                            self.deconv_output: [
                                self.batch_size, self.label_size,
                                self.label_size, 256
                            ]
                        })
                    if counter % 10 == 0:
                        print(
                            "Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]"
                            % ((ep + 1), counter, time.time() - time_, err))
                    if counter % 500 == 0:
                        self.save(config.checkpoint_dir, counter)
        # Test
        else:
            print("Now Start Testing...")
            res = list()
            for i in range(len(input_)):
                result = self.pred.eval({
                    self.images:
                    input_[i].reshape(1, input_[i].shape[0],
                                      input_[i].shape[1], 3),
                    self.deconv_output:
                    [1, self.label_size, self.label_size, 256]
                })
                # back to interval [0 , 1]
                x = np.squeeze(result)
                x = (x + 1) / 2
                res.append(x)
            res = np.asarray(res)
            res = merge(res, [nx, ny], self.c_dim)

            if self.test_img is "":
                imsave(res, config.result_dir + '/result.png', config)
            else:
                string = self.test_img.split(".")
                print(string)
                imsave(res, config.result_dir + '/' + string[0] + '.png',
                       config)
예제 #18
0
    def train(self, config):
        if config.is_train:
            input_setup(self.sess, config)
        else:
            nx, ny = input_setup(self.sess, config)

        if config.is_train:
            data_dir = os.path.join('./{}'.format(config.checkpoint_dir),
                                    "train.h5")
        else:
            data_dir = os.path.join('./{}'.format(config.checkpoint_dir),
                                    "test.h5")

        train_data, train_label = read_data(data_dir)

        # Stochastic gradient descent with the standard backpropagation
        self.train_op = tf.train.GradientDescentOptimizer(
            config.learning_rate).minimize(self.loss)

        tf.initialize_all_variables().run()

        counter = 0
        start_time = time.time()

        if self.load(self.checkpoint_dir):
            print(" [*] Load SUCCESS")
        else:
            print(" [!] Load failed...")

        if config.is_train:
            print("Training...")

            for ep in xrange(config.epoch):
                # Run by batch images
                batch_idxs = len(train_data) // config.batch_size
                for idx in xrange(0, batch_idxs):
                    batch_images = train_data[idx *
                                              config.batch_size:(idx + 1) *
                                              config.batch_size]
                    batch_labels = train_label[idx *
                                               config.batch_size:(idx + 1) *
                                               config.batch_size]

                    counter += 1
                    _, err = self.sess.run([self.train_op, self.loss],
                                           feed_dict={
                                               self.images: batch_images,
                                               self.labels: batch_labels
                                           })

                    if counter % 10 == 0:
                        print("Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" \
                          % ((ep+1), counter, time.time()-start_time, err))

                    if counter % 500 == 0:
                        self.save(config.checkpoint_dir, counter)

        else:
            print("Testing...")

            result = self.pred.eval({self.images: train_data})

            result = merge(result, [nx, ny])
            result = result.squeeze()
            image_path = os.path.join(os.getcwd(), config.sample_dir)
            image_path = os.path.join(image_path, "test_image.png")
            imsave(result, image_path)

            # Print PSNR
            labelimg = merge(train_label, [nx, ny])
            labelimg = labelimg.squeeze()
            print("HR image size: (%d, %d)" %
                  (labelimg.shape[0], labelimg.shape[1]))
            print("SR image size: (%d, %d)" %
                  (result.shape[0], result.shape[1]))

            bicubic = scipy.ndimage.interpolation.zoom(labelimg,
                                                       (1. / config.scale),
                                                       prefilter=False)
            bicubic = scipy.ndimage.interpolation.zoom(bicubic,
                                                       (config.scale / 1.),
                                                       prefilter=False)
            print("LR image size: (%d, %d)" %
                  (bicubic.shape[0], bicubic.shape[1]))

            psnr_sr = self.cal_psnr(result, labelimg)
            psnr_bicubic = self.cal_psnr(bicubic, labelimg)
            print("")
            print("SR PSNR = %.3f" % psnr_sr)
            print("Bicubic PSNR = %.3f" % psnr_bicubic)
예제 #19
0
    def train(self, config):
        if config.is_train:
            input_setup(self.sess, config)
        else:
            nx, ny = input_setup(self.sess, config)

        if config.is_train:
            data_dir = os.path.join('./{}'.format(config.checkpoint_dir),
                                    "train.h5")
        else:
            data_dir = os.path.join('./{}'.format(config.checkpoint_dir),
                                    "test.h5")

        train_data, train_label = read_data(data_dir)

        # Stochastic gradient descent with the standard backpropagation
        self.train_op = tf.train.AdamOptimizer().minimize(self.loss)
        tf.initialize_all_variables().run()

        counter = 0
        start_time = time.time()

        if self.load(self.checkpoint_dir):
            print(" [*] Load SUCCESS")
        else:
            print(" [!] Load failed...")

        if config.is_train:
            print("Training...")
            epoch_loss = 0
            average_loss = 0
            average_ssim = 0

            for ep in xrange(config.epoch):  #for each epoch
                # Run by batch images
                batch_idxs = len(
                    train_data
                ) // config.batch_size  #TODO: check data loader of tensorflow and shuffle training data in each epoch

                for idx in xrange(0, batch_idxs):

                    batch_images = train_data[idx *
                                              config.batch_size:(idx + 1) *
                                              config.batch_size]
                    batch_labels = train_label[idx *
                                               config.batch_size:(idx + 1) *
                                               config.batch_size]
                    counter += 1
                    _, err = self.sess.run([self.train_op, self.loss],
                                           feed_dict={
                                               self.images: batch_images,
                                               self.labels: batch_labels
                                           })  #update weights and biases

                    average_ssim += ssim(
                        self.pred.eval(feed_dict={
                            self.images: batch_images,
                            self.labels: batch_labels
                        })[:, 33:66, 33:66],
                        self.labels.eval(feed_dict={
                            self.images: batch_images,
                            self.labels: batch_labels
                        }),
                        multichannel=True) / batch_idxs

                    epoch_loss += err
                    average_loss = epoch_loss / float(batch_idxs)
                    PSNR = 10 * math.log10(1 / average_loss)

                    if counter % 10 == 0:  #display training loss for every 10 batches
                        print("Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" \
                          % ((ep+1), counter, time.time()-start_time, err))

                    if counter % (
                            batch_idxs * 10
                    ) == 0:  #save model for every 500 batches. Note: final model may not be saved!!!
                        self.save(config.checkpoint_dir, counter)
                    if counter % batch_idxs == 0:
                        with open('data.txt', 'a') as file:
                            file.write(
                                str(average_loss) + " , " + str(PSNR) + " , " +
                                str(average_ssim) + "\n")
                            #file.write(str(average_loss) + "\n")
                            epoch_loss = 0
                            average_loss = 0
                            average_ssim = 0
        else:
            print("Testing...")

            result = self.pred.eval({
                self.images: train_data,
                self.labels: train_label
            })
            print(nx, ny)
            result = merge(result, [nx, ny])
            result = result.squeeze()
            image_path = os.path.join(os.getcwd(), config.sample_dir)
            image_path = os.path.join(image_path, "test_image.png")
            imsave(result, image_path)
예제 #20
0
                               kernel_initializer='normal',
                               strides=1,
                               padding='VALID',
                               activation='relu',
                               name='conv3'))
    model.compile(optimizer=tf.keras.optimizers.Adam(lr=args.learning_rate),
                  loss=tf.losses.MSE)
    return model


pp.pprint(args)
os.makedirs(args.checkpoint_dir, exist_ok=True)
os.makedirs(args.save_path, exist_ok=True)
os.makedirs(args.sample_dir, exist_ok=True)
if args.is_train:
    input_setup(args)
    data_dir = 'checkpoint/train.h5'
    train_data, train_label = read_data(data_dir)
    srcnn = createmodel(args)
    # load last weights
    if args.load_weights is not None:
        if args.load_weights.endswith('.h5'):
            weights_path = args.load_weights
        else:
            weights_path = get_last_weights(args.save_path)
        try:
            last_step = int(
                os.path.basename(weights_path).split('_')[-1].split('.')[0])
        except:
            last_step = 0
예제 #21
0
    def train(self, config):
        if config.is_train:
            input_setup(self.sess, config)
        else:
            nx, ny = input_setup(self.sess, config)

        if config.is_train:
            data_dir = os.path.join('./{}'.format(config.checkpoint_dir),
                                    "train.h5")
        else:
            data_dir = os.path.join('./{}'.format(config.checkpoint_dir),
                                    "test.h5")

        train_data, train_label = read_data(data_dir)

        # Stochastic gradient descent with the standard backpropagation
        self.train_op = tf.train.GradientDescentOptimizer(
            config.learning_rate).minimize(self.loss)

        tf.initialize_all_variables().run()
        #For tf 0.12.1
        #tf.global_variables_initializer()

        counter = 0
        start_time = time.time()

        if self.load(self.checkpoint_dir):
            print(" [*] Load SUCCESS")
        else:
            print(" [!] Load failed...")

        if config.is_train:
            print("Training...")

            for ep in xrange(config.epoch):
                # Run by batch images
                batch_idxs = len(train_data) // config.batch_size
                for idx in xrange(0, batch_idxs):
                    batch_images = train_data[idx *
                                              config.batch_size:(idx + 1) *
                                              config.batch_size]
                    batch_labels = train_label[idx *
                                               config.batch_size:(idx + 1) *
                                               config.batch_size]

                    counter += 1
                    _, err = self.sess.run([self.train_op, self.loss],
                                           feed_dict={
                                               self.images: batch_images,
                                               self.labels: batch_labels
                                           })

                    if counter % 10 == 0:
                        print("Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" \
                              % ((ep + 1), counter, time.time() - start_time, err))

                    if counter % 500 == 0:
                        self.save(config.checkpoint_dir, counter)

        else:
            print("Testing...")

            print "Train data shape", train_data.shape
            print "Train label shape", train_label.shape

            result = self.pred.eval({
                self.images: train_data,
                self.labels: train_label
            })

            print "Result shape", result.shape
            print "nx ny", nx, ny

            image = merge(result, [nx, ny])
            original_image = merge(train_label, [nx, ny])
            interpolation = down_upscale(modcrop(original_image, config.scale),
                                         scale=config.scale)

            imsave(
                original_image,
                os.path.join(os.getcwd(), config.sample_dir, "original.bmp"),
                config.is_RGB)
            imsave(
                interpolation,
                os.path.join(os.getcwd(), config.sample_dir,
                             "interpolation.bmp"), config.is_RGB)
            imsave(image,
                   os.path.join(os.getcwd(), config.sample_dir, "srcnn.bmp"),
                   config.is_RGB)
예제 #22
0
파일: model.py 프로젝트: jiayi-ma/SMFuse
    def train(self, config):
        if config.is_train:
            input_setup(self.sess, config, "Train_ir")
            input_setup(self.sess, config, "Train_vi")
            # input_setup(self.sess, config,"Train_irtu")
            # input_setup(self.sess,config,"Train_vitu")
            input_setup(self.sess, config, "mask")
        else:
            nx_ir, ny_ir = input_setup(self.sess, config, "Test_ir")
            nx_vi, ny_vi = input_setup(self.sess, config, "Test_vi")
            nx_mask, ny_mask = input_setup(self.sess, config, "mask")

        if config.is_train:
            data_dir_ir = os.path.join('./{}'.format(config.checkpoint_dir),
                                       "Train_ir", "train.h5")
            data_dir_vi = os.path.join('./{}'.format(config.checkpoint_dir),
                                       "Train_vi", "train.h5")
            # data_dir_irtu = os.path.join('./{}'.format(config.checkpoint_dir), "Train_irtu","train.h5")
            # data_dir_vitu = os.path.join('./{}'.format(config.checkpoint_dir), "Train_vitu","train.h5")
            data_dir_mask = os.path.join('./{}'.format(config.checkpoint_dir),
                                         "mask", "train.h5")
        else:
            data_dir_ir = os.path.join('./{}'.format(config.checkpoint_dir),
                                       "Test_ir", "test.h5")
            data_dir_vi = os.path.join('./{}'.format(config.checkpoint_dir),
                                       "Test_vi", "test.h5")
            data_dir_mask = os.path.join('./{}'.format(config.checkpoint_dir),
                                         "mask", "test.h5")

        train_data_ir, train_label_ir = read_data(data_dir_ir)
        train_data_vi, train_label_vi = read_data(data_dir_vi)
        # train_data_irtu, train_label_irtu = read_data(data_dir_irtu)
        # train_data_vitu, train_label_vitu = read_data(data_dir_vitu)
        train_data_mask, train_label_mask = read_data(data_dir_mask)

        t_vars = tf.trainable_variables()
        # self.d_vars = [var for var in t_vars if 'discriminator' in var.name]
        # print(self.d_vars)
        self.g_vars = [var for var in t_vars if 'fusion_model' in var.name]
        print(self.g_vars)

        with tf.name_scope('train_step'):
            self.train_fusion_op = tf.train.AdamOptimizer(
                config.learning_rate).minimize(self.g_loss,
                                               var_list=self.g_vars)
            # self.train_discriminator_op=tf.train.AdamOptimizer(config.learning_rate).minimize(self.d_loss,var_list=self.d_vars)

        self.summary_op = tf.summary.merge_all()

        self.train_writer = tf.summary.FileWriter(config.summary_dir +
                                                  '/train',
                                                  self.sess.graph,
                                                  flush_secs=60)

        tf.initialize_all_variables().run()

        counter = 0
        start_time = time.time()

        if config.is_train:
            print("Training...")

            for ep in xrange(config.epoch):
                # Run by batch images
                batch_idxs = len(train_data_ir) // config.batch_size
                for idx in xrange(0, batch_idxs):
                    batch_images_ir = train_data_ir[idx *
                                                    config.batch_size:(idx +
                                                                       1) *
                                                    config.batch_size]
                    batch_labels_ir = train_label_ir[idx *
                                                     config.batch_size:(idx +
                                                                        1) *
                                                     config.batch_size]
                    batch_images_vi = train_data_vi[idx *
                                                    config.batch_size:(idx +
                                                                       1) *
                                                    config.batch_size]
                    batch_labels_vi = train_label_vi[idx *
                                                     config.batch_size:(idx +
                                                                        1) *
                                                     config.batch_size]
                    # batch_images_irtu = train_data_irtu[idx*config.batch_size : (idx+1)*config.batch_size]
                    # batch_labels_irtu = train_label_irtu[idx*config.batch_size : (idx+1)*config.batch_size]
                    # batch_images_vitu = train_data_vitu[idx*config.batch_size : (idx+1)*config.batch_size]
                    # batch_labels_vitu = train_label_vitu[idx*config.batch_size : (idx+1)*config.batch_size]
                    batch_images_mask = train_data_mask[idx *
                                                        config.batch_size:
                                                        (idx + 1) *
                                                        config.batch_size]
                    batch_labels_mask = train_label_mask[idx *
                                                         config.batch_size:
                                                         (idx + 1) *
                                                         config.batch_size]

                    counter += 1
                    # for i in range(2):
                    #   _, err_d= self.sess.run([self.train_discriminator_op, self.d_loss], feed_dict={self.images_ir: batch_images_ir, self.images_vi: batch_images_vi,self.images_mask: batch_images_mask, self.labels_vi: batch_labels_vi,self.labels_ir:batch_labels_ir,self.labels_mask:batch_labels_mask})

                    _, err_g, summary_str = self.sess.run(
                        [self.train_fusion_op, self.g_loss, self.summary_op],
                        feed_dict={
                            self.images_ir: batch_images_ir,
                            self.images_vi: batch_images_vi,
                            self.images_mask: batch_images_mask,
                            self.labels_ir: batch_labels_ir,
                            self.labels_vi: batch_labels_vi,
                            self.labels_mask: batch_labels_mask
                        })

                    self.train_writer.add_summary(summary_str, counter)

                    if counter % 10 == 0:
                        print("Epoch: [%2d], step: [%2d], time: [%4.4f], loss_g:[%.8f]" \
                          % ((ep+1), counter, time.time()-start_time, err_g))

                self.save(config.checkpoint_dir, ep)

        else:
            print("Testing...")

            result = self.fusion_mask.eval(
                feed_dict={
                    self.images_ir: train_data_ir,
                    self.labels_ir: train_label_ir,
                    self.images_vi: train_data_vi,
                    self.labels_vi: train_label_vi,
                    self.images_mask: train_data_mask,
                    self.labels_mask: train_label_mask
                })
            result = result * 127.5 + 127.5
            result = merge(result, [nx_ir, ny_ir])
            result = result.squeeze()
            image_path = os.path.join(os.getcwd(), config.sample_dir)
            image_path = os.path.join(image_path, "test_image.png")
            imsave(result, image_path)
예제 #23
0
    def train(self, config):

        merged = tf.summary.merge_all()

        if config.is_train:
            input_setup(self.sess, config)
        else:
            nx, ny = input_setup(self.sess, config)

        if config.is_train:
            data_dir = os.path.join('./{}'.format(config.checkpoint_dir),
                                    "train.h5")
        else:
            data_dir = os.path.join('./{}'.format(config.checkpoint_dir),
                                    "test.h5")

        train_data, train_label = read_data(data_dir)

        with tf.name_scope('Train'):
            # Stochastic gradient descent with the standard backpropagation
            #self.train_op = tf.train.AdamOptimizer(config.learning_rate).minimize(self.loss)
            self.train_op = tf.train.GradientDescentOptimizer(
                config.learning_rate).minimize(self.loss)

        tf.initialize_all_variables().run()

        counter = 0
        start_time = time.time()

        if self.load(self.checkpoint_dir):
            print(" [*] Load SUCCESS")
        else:
            print(" [!] Load failed...")

        if config.is_train:
            print("Training...")

            train_writer = tf.summary.FileWriter('logs/train', self.sess.graph)

            for ep in xrange(config.epoch):
                # Run by batch images
                batch_idxs = len(train_data) // config.batch_size
                for idx in xrange(0, batch_idxs):
                    batch_images = train_data[idx *
                                              config.batch_size:(idx + 1) *
                                              config.batch_size]
                    batch_labels = train_label[idx *
                                               config.batch_size:(idx + 1) *
                                               config.batch_size]

                    counter += 1
                    _, err = self.sess.run([self.train_op, self.loss],
                                           feed_dict={
                                               self.images: batch_images,
                                               self.labels: batch_labels
                                           })
                    if counter % 10 == 0:
                        print("Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" \
                          % ((ep+1), counter, time.time()-start_time, err))

                    if counter % 500 == 0:
                        summary = self.sess.run(merged,
                                                feed_dict={
                                                    self.images: batch_images,
                                                    self.labels: batch_labels
                                                })
                        train_writer.add_summary(summary, counter)
                        self.save(config.checkpoint_dir, counter)

        else:
            print("Testing...")

            test_writer = tf.summary.FileWriter('logs/test', self.sess.graph)

            result = self.pred.eval({
                self.images: train_data,
                self.labels: train_label
            })

            result = merge(result, [nx, ny])
            result = result.squeeze()
            image_path = os.path.join(os.getcwd(), config.sample_dir)
            image_path = os.path.join(image_path, "srcnn.png")
            imsave(result, image_path)
예제 #24
0
    def train(self, config):
        if config.is_train:
            input_setup(self.sess, config)
        else:
            nx, ny, img_name = input_setup(self.sess, config)

        if config.is_train:
            data_dir = os.path.join('./{}'.format(config.checkpoint_dir),
                                    "train.h5")
        else:
            data_dir = os.path.join('./{}'.format(config.checkpoint_dir),
                                    "test.h5")

        train_data, train_label = read_data(data_dir)

        if self.load(self.checkpoint_dir):
            print(" [*] Load SUCCESS")
        else:
            print(" [!] Load failed...")

        if config.is_train:
            print("Training...")

            # Stochastic gradient descent with the standard backpropagation
            self.train_op = tf.train.GradientDescentOptimizer(
                config.learning_rate).minimize(self.loss)

            tf.initialize_all_variables().run()

            counter = 0
            start_time = time.time()

            for ep in xrange(config.epoch):
                # Run by batch images
                batch_idxs = len(train_data) // config.batch_size
                for idx in xrange(0, batch_idxs):
                    batch_images = train_data[idx *
                                              config.batch_size:(idx + 1) *
                                              config.batch_size]
                    batch_labels = train_label[idx *
                                               config.batch_size:(idx + 1) *
                                               config.batch_size]

                    counter += 1
                    _, err = self.sess.run([self.train_op, self.loss],
                                           feed_dict={
                                               self.images: batch_images,
                                               self.labels: batch_labels
                                           })

                    if counter % 10 == 0:
                        print("Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" \
                          % ((ep+1), counter, time.time()-start_time, err))

                    if counter % 500 == 0:
                        self.save(config.checkpoint_dir, counter)

        else:
            print("Testing...")
            print("shape:", train_data.shape)
            result = self.pred.eval({
                self.images: train_data,
                self.labels: train_label
            })
            #print("res shape:", result.shape)

            result = merge(result, [nx, ny])
            result = result.squeeze()
            result = (result * 65535.).astype(
                np.uint16
            )  #added for writing tiff image and restore back the original color range

            #print("res is:", result[0:5,0:5])
            output_path = os.path.join(os.getcwd(), config.sample_dir)
            image_path = os.path.join(output_path, "test_srcnn_" + img_name +
                                      ".tiff")  #changed from png
            imsave(result, image_path)

            # this part added for directly comparing the PSNR
            label_path = os.path.join(output_path,
                                      "test_ori_" + img_name + ".tiff")
            bicubic_path = os.path.join(output_path,
                                        "test_bicubic_" + img_name + ".tiff")

            bicubic_img = imread(bicubic_path, is_grayscale=True)
            label_img = imread(label_path, is_grayscale=True)
            output_img = imread(image_path, is_grayscale=True)

            #compute psnr
            bicubic_psnr = psnr(label_img, bicubic_img)
            srcnn_psnr = psnr(label_img, output_img)

            #bicubic_img = bicubic_img.astype(np.float)
            #output_img = output_img.astype(np.float)
            #label_img = label_img.astype(np.float)
            #compute ssim
            bicubic_ssim = ssim(label_img, bicubic_img)
            srcnn_ssim = ssim(label_img, output_img)

            print("bicubic PSNR for " + img_name +
                  ": [{}]".format(bicubic_psnr))
            print("SRCNN PSNR for " + img_name + ": [{}]".format(srcnn_psnr))

            print("bicubic SSIM for " + img_name +
                  ": [{}]".format(bicubic_ssim))
            print("SRCNN SSIM for" + img_name + ": [{}]".format(srcnn_ssim))
    def train(self, config):
        
        # NOTE : if train, the nx, ny are ingnored
        
        #print("config.is_train:", config.is_train)
        nx, ny, original_shape = input_setup(config)

        #print("nx, ny, original_shape:", nx, ny, original_shape)
        data_dir = checkpoint_dir(config)
        
        print("reading data..")
        input_, label_ = read_data(data_dir)
        
        print("input_", input_.shape)
        
        merged_summary_op = tf.summary.merge_all()
        summary_writer = tf.summary.FileWriter("./log/train_300") #, self.sess.graph)
        #self.summary_writer = tf.summary.FileWriter("./log/", tf.get_default_graph())

        
        # Stochastic gradient descent with the standard backpropagation
        #self.train_op = tf.train.GradientDescentOptimizer(config.learning_rate).minimize(self.loss)
        
        self.optimizer = tf.train.AdamOptimizer(learning_rate=config.learning_rate)
        self.train_op = self.optimizer.minimize(self.loss)
        #self.train_op = tf.train.AdamOptimizer(learning_rate=config.learning_rate).minimize(self.loss)
        tf.initialize_all_variables().run()

        counter = 0
        time_ = time.time()

        
        self.load(config.checkpoint_dir)
        # Train
        if config.is_train:
            print("Now Start Training...")
            #for ep in range(config.epoch):
                
            for ep in range(300, 1000+1, 1):   
                
                #print("ep:", ep)
                #sys.exit()
                
                loss_summary_per_batch = []
                
                
                
                # Run by batch images
                batch_idxs = len(input_) // config.batch_size
                for idx in range(0, batch_idxs):
                    batch_images = input_[idx * config.batch_size : (idx + 1) * config.batch_size]
                    batch_labels = label_[idx * config.batch_size : (idx + 1) * config.batch_size]
                    counter += 1
                    _, err, summary = self.sess.run([self.train_op, self.loss, merged_summary_op], feed_dict={self.images: batch_images, self.labels: batch_labels})

                    
                    summary_pb = tf.summary.Summary()
                    summary_pb.ParseFromString(summary)
                    
                    summaries = {}
                    for val in summary_pb.value:
                        summaries[val.tag] = val.simple_value

                    #print("summaries:", summaries)
                    
                    
                    loss_summary_per_batch.append(summaries['loss'])
                    
                    
                    summary_writer.add_summary(summary, (ep) * counter)

                    #self.summary_writer.add_summary(summary, (ep+1) * counter)
                    
                    if counter % 1000 == 0:
                        print("Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" % ((ep), counter, time.time()-time_, err))
                    #print(label_[1] - self.pred.eval({self.images: input_})[1],'loss:]',err)
                    
                    
                    #print("Epoch: [%2d], loss: [%.8f]", (ep+1), tf.reduce_mean(tf.square(label_ - self.pred.eval({self.images: input_}))))
                    
                    #if counter % 500 == 0:
                    #if counter % 20 == 0:
                    #    self.save(config.checkpoint_dir, counter)
                        
                if ep ==0 or ep % 10 == 0:
                    self.save(config.checkpoint_dir, ep)
                    
                    ###
                    '''
                    try:
                        config.is_train = False
                        nx_, ny_, original_shape_ = input_setup(config)
                        data_dir_ = checkpoint_dir(config)
                        input__, label__ = read_data(data_dir_)

                        

                        print("Now Start Testing...")

                        result_ = self.pred.eval({self.images: input__})                   
                        image_ = merge(result_, [nx_, ny_], self.c_dim)

                        print("image after merge:", image_.shape)
                        print("[nx_, ny_]:", [nx_, ny_])

                        print("original_shape:", original_shape_)

                        print(type(image__), type(original_shape_[0]), type(original_shape_[1]))
                        cropped_img_ = crop_center(image, original_shape_[0], original_shape_[1])

                        print("cropped_img_:", cropped_img_.shape)

                        imsave(image_, config.result_dir + '/result-' + ep + '.png', config)

                        imsave(cropped_img_, config.result_dir + '/result_crop-' + ep + '.png', config)
                    except:
                        print("Unexpected error while evaluating image:", sys.exc_info()[0])

                    config.is_train = True
                    '''

                    ###
                    
                print("loss per epoch[%d] loss: [%.8f]"  % ((ep), np.mean(loss_summary_per_batch)))
                summary_writer.add_summary(tf.Summary(value=[tf.Summary.Value(tag="loss per epoch", simple_value=np.mean(loss_summary_per_batch)),]), ((ep)))
                
                summary_writer.add_summary(tf.Summary(value=[tf.Summary.Value(tag="learning rate", simple_value=self.optimizer._lr),]), ((ep)))
                
                #print("learning rate:", self.optimizer._lr)
                
        # Test
        else:
            print("Now Start Testing...")
            #print("nx","ny",nx,ny)
            
            result = self.pred.eval({self.images: input_})
            
            print("result:", result.shape)
            
            
            #print(label_[1] - result[1])
            image = merge(result, [nx, ny], self.c_dim)
            
            print("image after merge:", image.shape)
            print("[nx, ny]:", [nx, ny])

            print("original_shape:", original_shape)
            
            print(type(image), type(original_shape[0]), type(original_shape[1]))
            cropped_img = crop_center(image, original_shape[0], original_shape[1])
            
            
            print("cropped_img:", cropped_img.shape)
            
            #image_LR = merge(input_, [nx, ny], self.c_dim)
            #checkimage(image_LR)
            imsave(image, config.result_dir+'/result.png', config)
            
            imsave(cropped_img, config.result_dir+'/result_crop.png', config)
예제 #26
0
    def train(self, config):
        if config.is_train:
            input_setup(self.sess, config, "Train_ir")
            input_setup(self.sess, config, "Train_vi")
        else:
            nx_ir, ny_ir = input_setup(self.sess, config, "Test_ir")
            nx_vi, ny_vi = input_setup(self.sess, config, "Test_vi")

        if config.is_train:
            data_dir_ir = os.path.join('./{}'.format(config.checkpoint_dir),
                                       "Train_ir", "train.h5")
            data_dir_vi = os.path.join('./{}'.format(config.checkpoint_dir),
                                       "Train_vi", "train.h5")
        else:
            data_dir_ir = os.path.join('./{}'.format(config.checkpoint_dir),
                                       "Test_ir", "test.h5")
            data_dir_vi = os.path.join('./{}'.format(config.checkpoint_dir),
                                       "Test_vi", "test.h5")

        train_data_ir, train_label_ir = read_data(data_dir_ir)
        train_data_vi, train_label_vi = read_data(data_dir_vi)
        #找训练时更新的变量组(判决器和生成器是分开训练的,所以要找到对应的变量)
        t_vars = tf.trainable_variables()
        self.d_vars = [var for var in t_vars if 'discriminator' in var.name]
        print(self.d_vars)
        self.g_vars = [var for var in t_vars if 'fusion_model' in var.name]
        print(self.g_vars)
        # clip_ops = []
        # for var in self.d_vars:
        # clip_bounds = [-.01, .01]
        # clip_ops.append(
        # tf.assign(
        # var,
        # tf.clip_by_value(var, clip_bounds[0], clip_bounds[1])
        # )
        # )
        # self.clip_disc_weights = tf.group(*clip_ops)
        # Stochastic gradient descent with the standard backpropagation
        with tf.name_scope('train_step'):
            self.train_fusion_op = tf.train.AdamOptimizer(
                config.learning_rate).minimize(self.g_loss_total,
                                               var_list=self.g_vars)
            #self.train_discriminator_op=tf.train.AdamOptimizer(config.learning_rate).minimize(self.d_loss,var_list=self.d_vars)
        #将所有统计的量合起来
        self.summary_op = tf.summary.merge_all()
        #生成日志文件
        self.train_writer = tf.summary.FileWriter(config.summary_dir +
                                                  '/train',
                                                  self.sess.graph,
                                                  flush_secs=60)

        tf.initialize_all_variables().run()

        counter = 0
        start_time = time.time()

        # if self.load(self.checkpoint_dir):
        # print(" [*] Load SUCCESS")
        # else:
        # print(" [!] Load failed...")

        if config.is_train:
            print("Training...")

            for ep in xrange(config.epoch):
                # Run by batch images
                batch_idxs = len(train_data_ir) // config.batch_size
                for idx in xrange(0, batch_idxs):
                    batch_images_ir = train_data_ir[idx *
                                                    config.batch_size:(idx +
                                                                       1) *
                                                    config.batch_size]
                    batch_labels_ir = train_label_ir[idx *
                                                     config.batch_size:(idx +
                                                                        1) *
                                                     config.batch_size]
                    batch_images_vi = train_data_vi[idx *
                                                    config.batch_size:(idx +
                                                                       1) *
                                                    config.batch_size]
                    batch_labels_vi = train_label_vi[idx *
                                                     config.batch_size:(idx +
                                                                        1) *
                                                     config.batch_size]

                    counter += 1
                    #for i in range(2):
                    # _, err_d= self.sess.run([self.train_discriminator_op, self.d_loss], feed_dict={self.images_ir: batch_images_ir, self.images_vi: batch_images_vi, self.labels_vi: batch_labels_vi,self.labels_ir:batch_labels_ir})
                    # self.sess.run(self.clip_disc_weights)
                    _, err_g, summary_str = self.sess.run(
                        [
                            self.train_fusion_op, self.g_loss_total,
                            self.summary_op
                        ],
                        feed_dict={
                            self.images_ir: batch_images_ir,
                            self.images_vi: batch_images_vi,
                            self.labels_ir: batch_labels_ir,
                            self.labels_vi: batch_labels_vi
                        })
                    #将统计的量写到日志文件里
                    self.train_writer.add_summary(summary_str, counter)

                    if counter % 10 == 0:
                        print("Epoch: [%2d], step: [%2d], time: [%4.4f], loss_g:[%.8f]" \
                          % ((ep+1), counter, time.time()-start_time, err_g))
                        #print(a)

                self.save(config.checkpoint_dir, ep)

        else:
            print("Testing...")

            result = self.fusion_image.eval(
                feed_dict={
                    self.images_ir: train_data_ir,
                    self.labels_ir: train_label_ir,
                    self.images_vi: train_data_vi,
                    self.labels_vi: train_label_vi
                })
            result = result * 127.5 + 127.5
            result = merge(result, [nx_ir, ny_ir])
            result = result.squeeze()
            image_path = os.path.join(os.getcwd(), config.sample_dir)
            image_path = os.path.join(image_path, "test_image.png")
            imsave(result, image_path)
예제 #27
0
    def train(self, config):

        # NOTE : if train, the nx, ny are ingnored
        nx, ny = input_setup(config)

        data_dir = checkpoint_dir(config)

        input_, label_ = read_data(data_dir)

        # Stochastic gradient descent with the standard backpropagation

        # NOTE: learning rate decay
        global_step = tf.Variable(0, trainable=False)
        #learning_rate = tf.train.exponential_decay(config.learning_rate, global_step * config.batch_size, len(input_)*100, 0.1, staircase=True)
        # NOTE: Clip gradient
        opt = tf.train.AdamOptimizer(learning_rate=config.learning_rate)
        grad_and_value = opt.compute_gradients(self.loss)

        clip = tf.Variable(config.clip_grad, name='clip')
        capped_gvs = [(tf.clip_by_value(grad, -(clip), clip), var)
                      for grad, var in grad_and_value]

        self.train_op = opt.apply_gradients(capped_gvs,
                                            global_step=global_step)
        #self.train_op = tf.train.AdamOptimizer(learning_rate=config.learning_rate).minimize(self.loss)

        tf.initialize_all_variables().run()

        counter = 0
        time_ = time.time()

        self.load(config.checkpoint_dir)
        # Train
        if config.is_train:
            print("Now Start Training...")
            for ep in range(config.epoch):
                # Run by batch images
                batch_idxs = len(input_) // config.batch_size
                for idx in range(0, batch_idxs):
                    batch_images = input_[idx * config.batch_size:(idx + 1) *
                                          config.batch_size]
                    batch_labels = label_[idx * config.batch_size:(idx + 1) *
                                          config.batch_size]
                    counter += 1
                    _, err = self.sess.run([self.train_op, self.loss],
                                           feed_dict={
                                               self.images: batch_images,
                                               self.labels: batch_labels
                                           })

                    if counter % 10 == 0:
                        print(
                            "Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]"
                            % ((ep + 1), counter, time.time() - time_, err))
                    if counter % 500 == 0:
                        self.save(config.checkpoint_dir, counter)
        # Test
        else:
            print("Now Start Testing...")

            result = self.pred.eval({self.images: input_}) + input_
            image = merge(result, [nx, ny], self.c_dim)
            checkimage(merge(result, [nx, ny], self.c_dim))
            #checkimage(image_LR)
            imsave(image, config.result_dir + '/result.png', config)
예제 #28
0
    def train(self, config):
        if config.is_train:
            input_setup(self.sess, config)
        else:
            nx, ny = input_setup(self.sess, config)

        if config.is_train:
            data_dir = os.path.join('./{}'.format(config.checkpoint_dir),
                                    "train.h5")
        else:
            data_dir = os.path.join('./{}'.format(config.checkpoint_dir),
                                    "test.h5")

        train_data, train_label = read_data(data_dir)

        # Stochastic gradient descent with the standard backpropagation
        self.train_op = tf.train.GradientDescentOptimizer(
            config.learning_rate).minimize(self.loss)

        tf.initialize_all_variables().run()

        counter = 0
        start_time = time.time()

        if self.load(self.checkpoint_dir):
            print(" [*] Load SUCCESS")
            counter = self.epoch_num * len(train_data) // config.batch_size
        else:
            print(" [!] Load failed...")

        if config.is_train:
            print("Training...")

            # for ep in xrange(config.epoch):
            while self.epoch_num < config.epoch:
                # Run by batch images
                batch_idxs = len(train_data) // config.batch_size
                for idx in xrange(0, batch_idxs):
                    batch_images = train_data[idx *
                                              config.batch_size:(idx + 1) *
                                              config.batch_size]
                    batch_labels = train_label[idx *
                                               config.batch_size:(idx + 1) *
                                               config.batch_size]

                    counter += 1
                    _, err = self.sess.run([self.train_op, self.loss],
                                           feed_dict={
                                               self.images: batch_images,
                                               self.labels: batch_labels
                                           })

                    if counter % 10 == 0:
                        print("Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" \
                          % (self.epoch_num+1, counter, time.time()-start_time, err))
                if self.epoch_num % 10 == 0:
                    self.save(config.checkpoint_dir, self.epoch_num)
                self.epoch_num += 1

        else:
            print("Testing...")
            result = self.pred.eval({
                self.images: train_data,
                self.labels: train_label
            })
            result = merge(result, [nx, ny])
            # print "Before squeeze", result.shape
            if config.is_grayscale:
                result = result.squeeze()
            # else:
            #   result *= 255
            # print "After squeeze", result.shape
            image_path = os.path.join(os.getcwd(), config.sample_dir)
            image_path = os.path.join(
                image_path,
                str(config.sample_num) + "-test_image.png")
            imsave(result, image_path)
예제 #29
0
    def train(self, config):
        if config.is_train:
            input_setup(self.sess, config)  #读取图像并制作训练对,存储为.h5文件
        else:
            nx, ny = input_setup(self.sess, config)  #读取图像,放大并减去多余边

        if config.is_train:
            data_dir = os.path.join('./{}'.format(config.checkpoint_dir),
                                    "train.h5")  #读取训练数据文件路径
        else:
            data_dir = os.path.join('./{}'.format(config.checkpoint_dir),
                                    "test.h5")

        train_data, train_label = read_data(data_dir)  #读取训练数据

        # Stochastic gradient descent with the standard backpropagation
        self.train_op = tf.train.GradientDescentOptimizer(
            config.learning_rate).minimize(self.loss)

        tf.initialize_all_variables().run()

        counter = 0
        start_time = time.time()

        if self.load(self.checkpoint_dir):
            print(" [*] Load SUCCESS")
        else:
            print(" [!] Load failed...")

        if config.is_train:
            print("Training...")

            for ep in xrange(config.epoch):
                # Run by batch images
                batch_idxs = len(train_data) // config.batch_size
                for idx in xrange(0, batch_idxs):
                    batch_images = train_data[idx *
                                              config.batch_size:(idx + 1) *
                                              config.batch_size]
                    batch_labels = train_label[idx *
                                               config.batch_size:(idx + 1) *
                                               config.batch_size]

                    counter += 1
                    _, err = self.sess.run([self.train_op, self.loss],
                                           feed_dict={
                                               self.images: batch_images,
                                               self.labels: batch_labels
                                           })

                    if counter % 10 == 0:
                        print("Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" \
                          % ((ep+1), counter, time.time()-start_time, err))

                    if counter % 500 == 0:
                        self.save(config.checkpoint_dir, counter)

        #注意这个是测试过程,如果真实使用,应该用label(原图)进行计算(已更正)
        else:
            print("Testing...")

            # result = self.pred.eval({self.images: train_data, self.labels: train_label})#源代码
            result = self.pred.eval({self.images: train_data})

            result = merge(result, [nx, ny])
            result = result.squeeze()
            # result = scipy.ndimage.interpolation.zoom(result, (2/1.), prefilter=False)
            image_path = os.path.join(os.getcwd(),
                                      config.sample_dir)  #以下为存储结果图片
            image_path = os.path.join(
                image_path, "output" + str(config.image_index) + ".png")
            imsave(result, image_path)
예제 #30
0
    def train(self, config):
        """
        Training process.
        """
        print("Training...")

        input_setup(self.sess, config)

        data_dir = os.path.join('./{}'.format(config.checkpoint_dir),
                                "train.h5")

        train_data, train_label = read_data(data_dir)

        # Stochastic gradient descent with the standard backpropagation
        #self.train_op = tf.train.GradientDescentOptimizer(config.learning_rate).minimize(self.loss)
        self.train_op = tf.train.AdamOptimizer(config.learning_rate).minimize(
            self.loss)

        self.sess.run(tf.global_variables_initializer())

        # Define iteration counter, timer and average loss
        itera_counter = 0
        avg_loss = 0
        avg_500_loss = 0
        start_time = time.time()

        # Load checkpoint
        if self.load(self.checkpoint_dir, config.scale):
            print(" [*] Load SUCCESS")
        else:
            print(" [!] Load failed...")

        for ep in range(config.epoch):
            # Run by batch images
            batch_idxs = len(train_data) // config.batch_size

            # Shuffle the batch data
            shuffled_data = list(zip(train_data, train_label))
            random.shuffle(shuffled_data)
            train_data, train_label = zip(*shuffled_data)

            for idx in range(0, batch_idxs):
                itera_counter += 1

                # Get the training and testing data
                batch_images = train_data[idx * config.batch_size:(idx + 1) *
                                          config.batch_size]
                batch_labels = train_label[idx * config.batch_size:(idx + 1) *
                                           config.batch_size]

                # Run the model
                _, err = self.sess.run([self.train_op, self.loss],
                                       feed_dict={
                                           self.images: batch_images,
                                           self.labels: batch_labels
                                       })

                avg_loss += err
                avg_500_loss += err

                if itera_counter % 10 == 0:
                    print("Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" \
                         % ((ep+1), itera_counter, time.time()-start_time, err))

                if itera_counter % 500 == 0:
                    self.save(config.checkpoint_dir, config.scale,
                              itera_counter)

                    print("==> Epoch: [%2d], average loss of 500 steps: [%.8f], average loss: [%.8f]" \
                         % ((ep+1), avg_500_loss/500, avg_loss/itera_counter))
                    avg_500_loss = 0