コード例 #1
0
    def test(self, config):
        # NOTE : if train, the nx, ny are ingnored
        # Read image files and make their sub-images and saved them as a h5 file format
        nx, ny = input_setup_test(config)

        # get the test.h5 file
        data_dir = checkpoint_dir(config)

        # Read h5 format data file
        input_, label_ = read_data(data_dir)

        #To load the checkpoint use to test or pretrain and refine model
        self.load(config.checkpoint_dir)

        print("Now Start Testing...")
        time_ = time.time()

        result = self.pred.eval({self.images: input_}) + input_

        #image merge : only Y channel
        image = merge(result, [nx, ny], self.c_dim)

        #from Y channel to RGB
        image = Ycbcr2RGB(image, config)
        #show image
        #checkimage(image) #merge(result, [nx, ny], self.c_dim)
        checkimage(image)
        imsave(image, config.result_dir + '/result.png', config)
        print("time: [%4.4f]" % (time.time() - time_))
コード例 #2
0
    def train(self, config):
        print("\nPrepare Data...\n")
        input_setup(config)
        data_dir = get_data_dir(config)
        data_num = get_data_num(data_dir)

        self.train_op = tf.train.AdamOptimizer(
            learning_rate=config.learning_rate).minimize(self.loss)
        tf.initialize_all_variables().run()

        # merged_summary_op = tf.summary.merge_all()
        # summary_writer = tf.summary.FileWriter(config.checkpoint_dir, self.sess.graph)

        counter = 0
        time_ = time.time()

        self.load(config.checkpoint_dir)
        # Train
        if config.is_train:
            print("\nNow Start Training...\n")
            for ep in range(config.epoch):
                # Run by batch images
                batch_idxs = data_num // config.batch_size
                for idx in range(0, batch_idxs):
                    batch_images, batch_labels = get_batch(
                        data_dir, idx, config.batch_size)
                    counter += 1
                    _, err = self.sess.run([self.train_op, self.loss],
                                           feed_dict={
                                               self.images: batch_images,
                                               self.labels: batch_labels
                                           })

                    if counter % 10 == 0:
                        print(
                            "Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]"
                            % ((ep + 1), counter, time.time() - time_, err))
                    if counter % 100 == 0:
                        self.save(config.checkpoint_dir, counter)
                        # summary_str = self.sess.run(merged_summary_op)
                        # summary_writer.add_summary(summary_str, counter)
        # Test
        else:
            print("\nNow Start Testing...\n")
            time_ = time.time()
            input_, label_ = get_batch(data_dir, 0, 1)
            result = self.sess.run([self.pred],
                                   feed_dict={
                                       self.images:
                                       input_[0].reshape(
                                           1, self.h, self.w, self.c_dim)
                                   })
            print "time:", (time.time() - time_)
            x = np.squeeze(result)
            checkimage(x)
            print "shape:", x.shape
            imsave(x, config.result_dir + '/result.png', config)
コード例 #3
0
    def train(self, config):

        # NOTE : if train, the nx, ny are ingnored
        input_setup(config)

        data_dir = checkpoint_dir(config)

        input_, label_ = read_data(data_dir)

        print(input_.shape, label_.shape)

        # Stochastic gradient descent with the standard backpropagation
        self.train_op = tf.train.AdamOptimizer(
            learning_rate=config.learning_rate).minimize(self.loss)
        tf.initialize_all_variables().run()

        counter = 0
        time_ = time.time()

        self.load(config.checkpoint_dir)
        # Train
        if config.is_train:
            print("Now Start Training...")
            for ep in range(config.epoch):
                # Run by batch images
                batch_idxs = len(input_) // config.batch_size
                for idx in range(0, batch_idxs):
                    batch_images = input_[idx * config.batch_size:(idx + 1) *
                                          config.batch_size]
                    batch_labels = label_[idx * config.batch_size:(idx + 1) *
                                          config.batch_size]
                    counter += 1
                    _, err = self.sess.run([self.train_op, self.loss],
                                           feed_dict={
                                               self.images: batch_images,
                                               self.labels: batch_labels
                                           })

                    if counter % 10 == 0:
                        print(
                            "Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]"
                            % ((ep + 1), counter, time.time() - time_, err))
                        #print(label_[1] - self.pred.eval({self.images: input_})[1],'loss:]',err)
                    if counter % 500 == 0:
                        self.save(config.checkpoint_dir, counter)
        # Test
        else:
            print("Now Start Testing...")
            result = self.pred.eval({
                self.images:
                input_[0].reshape(1, self.h, self.w, self.c_dim)
            })
            x = np.squeeze(result)
            checkimage(x)
            print(x.shape)
            imsave(x, config.result_dir + '/result.png', config)
コード例 #4
0
ファイル: model.py プロジェクト: WxlSky/ESPCN-1
    def test(self, config):
        input_setup(config)

        data_dir = checkpoint_dir(config)

        input_, label_ = read_data(data_dir)

        print(input_.shape, label_.shape)
        print(config.is_train)
        counter = 0
        time_ = time.time()

        self.load(config.checkpoint_dir)
        print("Now Start Testing...")
        result = self.pred.eval(
            {self.images: input_[0].reshape(1, self.h, self.w, self.c_dim)})
        x = np.squeeze(result)
        checkimage(x)
        print(x.shape)
        imsave(x, 'result/result2.png', config)
コード例 #5
0
    def test(self, config):
        print("\nPrepare Data...\n")
        paths = prepare_data(config)
        data_num = len(paths)

        avg_time = 0
        print("\nNow Start Testing...\n")
        for idx in range(data_num):
            input_ = imread(paths[idx])
            input_ = input_[:, :, ::-1]
            input_ = input_[np.newaxis, :]

            images_shape = input_.shape
            labels_shape = input_.shape * np.asarray(
                [1, self.scale, self.scale, 1])
            self.build_model(images_shape, labels_shape)
            tf.global_variables_initializer().run(session=self.sess)

            self.load(config.checkpoint_dir, restore=True)

            time_ = time.time()
            result = self.sess.run([self.pred],
                                   feed_dict={self.images: input_ / 255.0})
            avg_time += time.time() - time_

            self.sess.close()
            tf.reset_default_graph()
            self.sess = tf.Session()

            x = np.squeeze(result) * 255.0
            x = np.clip(x, 0, 255)
            x = x[:, :, ::-1]
            checkimage(np.uint8(x))

            if not os.path.isdir(os.path.join(os.getcwd(), config.result_dir)):
                os.makedirs(os.path.join(os.getcwd(), config.result_dir))
            imsave(x, config.result_dir + "/%d.png" % idx)

        print("Avg. Time:", avg_time / data_num)
コード例 #6
0
ファイル: model.py プロジェクト: vdivakar/TensorFlow-VDSR
    def train(self, config):

        # NOTE : if train, the nx, ny are ingnored
        nx, ny = input_setup(config)

        data_dir = checkpoint_dir(config)

        input_, label_ = read_data(data_dir)

        # Stochastic gradient descent with the standard backpropagation

        # NOTE: learning rate decay
        global_step = tf.Variable(0, trainable=False)
        #learning_rate = tf.train.exponential_decay(config.learning_rate, global_step * config.batch_size, len(input_)*100, 0.1, staircase=True)
        # NOTE: Clip gradient
        opt = tf.train.AdamOptimizer(learning_rate=config.learning_rate)
        grad_and_value = opt.compute_gradients(self.loss)

        clip = tf.Variable(config.clip_grad, name='clip')
        capped_gvs = [(tf.clip_by_value(grad, -(clip), clip), var)
                      for grad, var in grad_and_value]

        self.train_op = opt.apply_gradients(capped_gvs,
                                            global_step=global_step)
        #self.train_op = tf.train.AdamOptimizer(learning_rate=config.learning_rate).minimize(self.loss)

        tf.initialize_all_variables().run()

        counter = 0
        time_ = time.time()

        self.load(config.checkpoint_dir)
        # Train
        if config.is_train:
            print("Now Start Training...")
            for ep in range(config.epoch):
                # Run by batch images
                batch_idxs = len(input_) // config.batch_size
                for idx in range(0, batch_idxs):
                    batch_images = input_[idx * config.batch_size:(idx + 1) *
                                          config.batch_size]
                    batch_labels = label_[idx * config.batch_size:(idx + 1) *
                                          config.batch_size]
                    counter += 1
                    _, err = self.sess.run([self.train_op, self.loss],
                                           feed_dict={
                                               self.images: batch_images,
                                               self.labels: batch_labels
                                           })

                    if counter % 10 == 0:
                        print(
                            "Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]"
                            % ((ep + 1), counter, time.time() - time_, err))
                    if counter % 500 == 0:
                        self.save(config.checkpoint_dir, counter)
        # Test
        else:
            print("Now Start Testing...")

            result = self.pred.eval({self.images: input_}) + input_
            image = merge(result, [nx, ny], self.c_dim)
            checkimage(merge(result, [nx, ny], self.c_dim))
            #checkimage(image_LR)
            imsave(image, config.result_dir + '/result.png', config)
コード例 #7
0
ファイル: model.py プロジェクト: kweisamx/SR-test
    def train(self, config):
        
        # NOTE : if train, the nx, ny are ingnored
        input_setup(config)

        data_dir = checkpoint_dir(config)
        
        input_, label_ = read_data(data_dir)

        residul = make_bicubic(input_, config.scale)


        '''
        opt = tf.train.AdamOptimizer(learning_rate=config.learning_rate)
        grad_and_value = opt.compute_gradients(self.loss)

        clip = tf.Variable(0.1, name='clip') 
        capped_gvs = [(tf.clip_by_value(grad, -(clip), clip), var) for grad, var in grad_and_value]

        self.train_op = opt.apply_gradients(capped_gvs)
        '''
        # Stochastic gradient descent with the standard backpropagation
        self.train_op = tf.train.AdamOptimizer(learning_rate=config.learning_rate).minimize(self.loss)
        tf.initialize_all_variables().run()

        counter = 0
        time_ = time.time()

        self.load(config.checkpoint_dir)
        # Train
        if config.is_train:
            print("Now Start Training...")
            for ep in range(config.epoch):
                # Run by batch images
                batch_idxs = len(input_) // config.batch_size
                for idx in range(0, batch_idxs):
                    batch_images = input_[idx * config.batch_size : (idx + 1) * config.batch_size]
                    batch_residul = residul[idx * config.batch_size : (idx + 1) * config.batch_size]
                    batch_labels = label_[idx * config.batch_size : (idx + 1) * config.batch_size]
                    counter += 1
                    _, err = self.sess.run([self.train_op, self.loss], feed_dict={self.images: batch_images, self.labels: batch_labels, self.residul: batch_residul })

                    if counter % 10 == 0:
                        print("Epoch: [%2d], step: [%2d], time: [%4.4f], loss: [%.8f]" % ((ep+1), counter, time.time()-time_, err))
                        #print(label_[1] - self.pred.eval({self.images: input_})[1],'loss:]',err)
                    if counter % 500 == 0:
                        self.save(config.checkpoint_dir, counter)
        # Test
        else:
            print("Now Start Testing...")
            print(input_[0].shape)

            checkimage(residul[0])
            result = self.pred.eval({self.images: input_[0].reshape(1, input_[0].shape[0], input_[0].shape[1], self.c_dim)})
            x = np.squeeze(result)
            checkimage(x)
            x = residul[0] + x
            
            # back to interval [0 , 1]
            x = ( x + 1 ) / 2
            checkimage(x)
            print(x.shape)
            imsave(x, config.result_dir+'/result.png', config)