コード例 #1
0
ファイル: optimizers.py プロジェクト: injae-kim/deeplab-V3
    def train(self,
              sess,
              save_dir='ckpt',
              details=False,
              verbose=True,
              **kwargs):
        '''
        Run optimizer to train the model.

        @param
        - sess: tf.Session.
        - save_dir: str, the directory to save the learned weights of the model.
        - details: bool, whether to return detailed results.
        - verbose: bool, whether to print details during training.
        - kwargs: dict, extra arguments containing training hyperparameters.
        
        @return 
        - train_results: dict, containing detailed results of training.
        '''

        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())

        # Make Tensorboard log file
        merged_summary = tf.summary.merge_all()
        writer = tf.summary.FileWriter("./logs/tensorboard", sess.graph)

        # Load pretrained weights
        if 'init_pretrained_model' in self.model.net.keys():
            self.model.net['init_pretrained_model'](sess)
            print('Load pretrained weights...')

        # Initialize some variables for training
        train_results = dict()
        train_size = self.train_set._num_examples
        num_steps_per_epoch = train_size // self.batch_size
        num_steps = self.num_epochs * num_steps_per_epoch

        if verbose:
            print('Running training loop...')
            print('Number of training iterations: {}'.format(num_steps))

        step_losses, step_scores, eval_scores = [], [], []
        start_time = time.time()

        # Start training loop
        for i in range(num_steps):

            # Perform a gradient update from a single minibatch
            step_loss, step_y_true, step_y_pred = self._step(sess)
            step_losses.append(step_loss)

            # Perform evaluation in the end of each epoch
            if (i + 1) % num_steps_per_epoch == 0:
                # Evaluate model with current minibatch, from training set
                step_score = self.evaluator.score(step_y_true, step_y_pred)
                step_scores.append(step_score)

                # If validation set is initially given, use if for evaluation
                if self.val_set is not None:
                    # Evaluate model with the validation set
                    eval_y_pred = self.model.predict(sess,
                                                     self.val_set,
                                                     verbose=False,
                                                     **kwargs)
                    eval_score = self.evaluator.score(self.val_set._labels,
                                                      eval_y_pred)
                    eval_scores.append(eval_score)

                    curr_score = eval_score

                    if verbose:
                        # Print intermediate results
                        print('[epoch {}]\tloss: {:.6f} |Train score: {:.6f} |Eval score: {:.6f} |lr: {:.6f}'\
                            .format(self.curr_epoch, step_loss, step_score, eval_score, self.curr_learning_rate))
                        # Plot intermediate results
                        plot_learning_curve(-1,
                                            step_losses,
                                            step_scores,
                                            eval_scores=eval_scores,
                                            mode=self.evaluator.mode,
                                            img_dir=save_dir)

                else:
                    curr_score = step_score

                    if verbose:
                        # Print intermediate results
                        print('[epoch {}]\tloss: {:.6f} |Train score: {:.6f} |lr: {:.6f}'\
                            .format(self.curr_epoch, step_loss, step_score, self.curr_learning_rate))
                        # Plot intermediate results
                        plot_learning_curve(-1,
                                            step_losses,
                                            step_scores,
                                            eval_scores=None,
                                            mode=self.evaluator.mode,
                                            img_dir=save_dir)

                # Keep track of the current best model, by comparing current score and the best score
                if self.evaluator.is_better(curr_score, self.best_score,
                                            **kwargs):
                    self.best_score = curr_score
                    self.num_bad_epochs = 0
                    saver.save(
                        sess, os.path.join(os.getcwd(), save_dir,
                                           'model.ckpt'))

                # 정확도가 전의 epoch보다 떨어진 epoch은 저장하지 않음, 연속으로 정확도가 떨어지면 학습률 감소
                else:
                    self.num_bad_epochs += 1

                self._update_learning_rate(**kwargs)
                self.curr_epoch += 1

        if verbose:
            print('Total training time(sec): {}'.format(time.time() -
                                                        start_time))
            print('Best {} score: {}'.format(
                'evaluation' if eval else 'training', self.best_score))

        print('Done.')

        if details:
            # Store training results in a dictionary
            train_results['step_losses'] = step_losses
            train_results['step_scores'] = step_scores
            if self.val_set is not None:
                train_results['eval_scores'] = eval_scores

            return train_results
コード例 #2
0
    def train(self,
              sess,
              save_dir='/tmp',
              details=False,
              verbose=True,
              **kwargs):
        """
		Run optimizer to train the model.
		:param sess: tf.Session.
		:param save_dir: str, the directory to save the learned weights of the model.
		:param details: bool, whether to return detailed results.
		:param verbose: bool, whether to print details during training.
		:param kwargs: dict, extra arguments containing training hyperparameters.
			- nms_flag: bool, whether to do non maximum supression(nms) for evaluation.
		:return train_results: dict, containing detailed results of training.
		"""
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())  # initialize all weights

        train_results = dict()
        train_size = self.train_set.num_examples
        num_steps_per_epoch = train_size // self.batch_size
        num_steps = self.num_epochs * num_steps_per_epoch
        if verbose:
            print('Running training loop...')
            print('Number of training iterations: {}'.format(num_steps))

        step_losses, step_scores, eval_scores = [], [], []
        start_time = time.time()
        print('start_time {}'.format(start_time))
        print('num_steps_per_epoch {}'.format(num_steps_per_epoch))
        # Start training loop
        for i in range(num_steps):
            print('num_steps {}'.format(i))
            # Perform a gradient update from a single minibatch
            step_loss, step_y_true, step_y_pred, step_X = self._step(
                sess, **kwargs)
            step_losses.append(step_loss)
            # Perform evaluation in the end of each epoch
            if (i) % num_steps_per_epoch == 0:
                # Evaluate model with current minibatch, from training set
                step_score = self.evaluator.score(step_y_true, step_y_pred,
                                                  **kwargs)
                step_scores.append(step_score)

                # If validation set is initially given, use if for evaluation
                if self.val_set is not None:
                    # Evaluate model with the validation set
                    eval_y_pred = self.model.predict(sess,
                                                     self.val_set,
                                                     verbose=False,
                                                     **kwargs)
                    eval_score = self.evaluator.score(self.val_set.labels,
                                                      eval_y_pred, **kwargs)
                    eval_scores.append(eval_score)

                    if verbose:
                        # Print intermediate results
                        print('[epoch {}]\tloss: {:.6f} |Train score: {:.6f} |Eval score: {:.6f} |lr: {:.6f}'\
                         .format(self.curr_epoch, step_loss, step_score, eval_score, self.curr_learning_rate))
                        # Plot intermediate results
                        plot_learning_curve(-1,
                                            step_losses,
                                            step_scores,
                                            eval_scores=eval_scores,
                                            mode=self.evaluator.mode,
                                            img_dir=save_dir)

                    curr_score = eval_score
                else:
                    if verbose:
                        # Print intermediate results
                        print('[epoch {}]\tloss: {:.6f} |Train score: {:.6f} |lr: {:.6f}'\
                         .format(self.curr_epoch, step_loss, step_score, self.curr_learning_rate))
                        # Plot intermediate results
                        plot_learning_curve(-1,
                                            step_losses,
                                            step_scores,
                                            eval_scores=None,
                                            mode=self.evaluator.mode,
                                            img_dir=save_dir)

                    curr_score = step_score

                # Keep track of the current best model,
                # by comparing current score and the best score

                if self.evaluator.is_better(curr_score, self.best_score,
                                            **kwargs):
                    self.best_score = curr_score
                    self.num_bad_epochs = 0
                    saver.save(sess, os.path.join(save_dir, 'model.ckpt'))
                else:
                    self.num_bad_epochs += 1

                self._update_learning_rate(**kwargs)
                self.curr_epoch += 1

        if verbose:
            print('Total training time(sec): {}'.format(time.time() -
                                                        start_time))
            print('Best {} score: {}'.format(
                'evaluation' if eval else 'training', self.best_score))

        print('Done.')

        if details:
            # Store training results in a dictionary
            train_results['step_losses'] = step_losses
            train_results['step_scores'] = step_scores
            if self.val_set is not None:
                train_results['eval_scores'] = eval_scores

            return train_results
コード例 #3
0
    def train(self,
              sess,
              save_dir='/tmp',
              details=False,
              verbose=True,
              **kwargs):
        """
		Run optimizer to train the model.
		:param sess: tf.Session.
		:param save_dir: str, the directory to save the learned weights of the model.
		:param details: bool, whether to return detailed results.
		:param verbose: bool, whether to print details during training.
		:param kwargs: dict, extra arguments containing training hyperparameters.
			- nms_flag: bool, whether to do non maximum supression(nms) for evaluation.
		:return train_results: dict, containing detailed results of training.
		"""
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())  # initialize all weights

        inception_path = kwargs.pop(
            'inception_path',
            './inception/inception-2015-12-05/classify_image_graph_def.pb')
        dataset_stats_path = kwargs.pop('dataset_stats_path',
                                        './data/thumbnails128x128/stats.pkl')
        fid = FID(inception_path, dataset_stats_path, sess)

        train_results = dict()
        train_size = self.train_set.num_examples
        print("Size of train set :", train_size)
        num_steps_per_epoch = train_size // self.batch_size
        num_steps = self.num_epochs * num_steps_per_epoch

        n_eval = kwargs.pop('eval_sample_size', 10000)
        batch_size_eval = kwargs.pop('batch_size_eval', 500)

        sample_H = kwargs.pop('sample_H', 2)
        sample_W = kwargs.pop('sample_W', 10)
        sample_dir = kwargs.pop('sample_dir', save_dir)

        if verbose:
            print('Running training loop...')
            print('Number of training iterations: {}'.format(num_steps))

        step_losses_G, step_losses_D, step_scores, eval_scores = [], [], [], []
        start_time = time.time()

        # Start training loop
        for i in range(num_steps):
            # Perform a gradient update from a single minibatch
            step_loss_G, step_loss_D, step_X, gen_img, D = self._step(
                sess, **kwargs)
            step_losses_G.append(step_loss_G)
            step_losses_D.append(step_loss_D)
            # Perform evaluation in the end of each epoch
            if (i) % 10 == 0:
                print('[step {}]\tG_loss: {:.6f}|D_loss:{:.6f} |lr: {:.6f}'\
                   .format(i, step_loss_G, step_loss_D, self.curr_learning_rate))
            if (i) % num_steps_per_epoch == num_steps_per_epoch - 1:
                # Evaluate model with current minibatch, from training set
                fid.reset_FID()
                fid.extract_inception_features(gen_img)
                step_score = fid.calculate_FID()
                step_scores.append(step_score)

                sample_image = self.model.generate(sess,
                                                   self.sample_z,
                                                   verbose=False,
                                                   **kwargs)

                save_sample_images(sample_dir, i, sample_image, sample_H,
                                   sample_W)

                eval_score = self.evaluator.score(sess, fid, self.model,
                                                  **kwargs)
                eval_scores.append(eval_score)

                if verbose:
                    # Print intermediate results
                    print('[epoch {}]\tG_loss: {:.6f}|D_loss:{:.6f} |Train score: {:.6f} |Eval score: {:.6f} |lr: {:.6f}'\
                     .format(self.curr_epoch, step_loss_G, step_loss_D, step_score, eval_score, self.curr_learning_rate))
                    # Plot intermediate results
                    plot_learning_curve(-1,
                                        step_losses_G,
                                        step_losses_D,
                                        step_scores,
                                        eval_scores=eval_scores,
                                        img_dir=save_dir)

                curr_score = eval_score

                # Keep track of the current best model,
                # by comparing current score and the best score

                if self.evaluator.is_better(curr_score, self.best_score,
                                            **kwargs):
                    self.best_score = curr_score
                    self.num_bad_epochs = 0
                    saver.save(
                        sess,
                        os.path.join(save_dir,
                                     'model_{}.ckpt'.format(self.curr_epoch)))
                else:
                    self.num_bad_epochs += 1
                # Uncomment if you want to update learning rate


# 				self._update_learning_rate(**kwargs)
                self.curr_epoch += 1

        if verbose:
            print('Total training time(sec): {}'.format(time.time() -
                                                        start_time))
            print('Best {} score: {}'.format(
                'evaluation' if eval else 'training', self.best_score))

        print('Done.')

        if details:
            # Save last model
            saver.save(sess, os.path.join(save_dir, 'model.ckpt'))
            # Store training results in a dictionary
            train_results['step_losses_G'] = step_losses_G
            train_results['step_losses_D'] = step_losses_D
            train_results['step_scores'] = step_scores
            train_results['eval_scores'] = eval_scores

            return train_results