def train(self): """ Train function of every epoch during train phase. """ self.det_net.train() start_time = time.time() # Adjust the learning rate after every epoch. self.runner_state['epoch'] += 1 for i, data_dict in enumerate(self.train_loader): Trainer.update(self, solver_dict=self.configer.get('solver')) self.data_time.update(time.time() - start_time) # Forward pass. data_dict = RunnerHelper.to_device(self, data_dict) out = self.det_net(data_dict) loss_dict = self.det_loss(out) loss = loss_dict['loss'].mean() self.train_losses.update(loss.item(), len(DCHelper.tolist(data_dict['meta']))) self.optimizer.zero_grad() loss.backward() RunnerHelper.clip_grad(self.det_net, 10.) self.optimizer.step() # Update the vars of the train phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.runner_state['iters'] += 1 # Print the log info & reset the states. if self.runner_state['iters'] % self.configer.get( 'solver', 'display_iter') == 0: Log.info( 'Train Epoch: {0}\tTrain Iteration: {1}\t' 'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\t' 'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\n' 'Learning rate = {3}\tLoss = {loss.val:.8f} (ave = {loss.avg:.8f})\n' .format(self.runner_state['epoch'], self.runner_state['iters'], self.configer.get('solver', 'display_iter'), RunnerHelper.get_lr(self.optimizer), batch_time=self.batch_time, data_time=self.data_time, loss=self.train_losses)) self.batch_time.reset() self.data_time.reset() self.train_losses.reset() if self.configer.get('solver', 'lr')['metric'] == 'iters' \ and self.runner_state['iters'] == self.configer.get('solver', 'max_iters'): break # Check to val the current model. if self.runner_state['iters'] % self.configer.get( 'solver', 'test_interval') == 0: self.val()
def train(self): """ Train function of every epoch during train phase. """ self.cls_net.train() start_time = time.time() # Adjust the learning rate after every epoch. self.runner_state['epoch'] += 1 for i, data_dict in enumerate(self.train_loader): Trainer.update( self, warm_list=(0, 1), warm_lr_list=(self.solver_dict['lr']['base_lr'] * self.configer.get('solver.lr.bb_lr_scale'), self.solver_dict['lr']['base_lr']), solver_dict=self.solver_dict) self.data_time.update(time.time() - start_time) data_dict = RunnerHelper.to_device(self, data_dict) # Forward pass. out = self.cls_net(data_dict) loss_dict = self.loss(out) # Compute the loss of the train batch & backward. loss = loss_dict['loss'] self.train_losses.update( {key: loss.item() for key, loss in loss_dict.items()}, data_dict['img'].size(0)) self.optimizer.zero_grad() loss.backward() if self.configer.get('network', 'clip_grad', default=False): RunnerHelper.clip_grad(self.cls_net, 10.) self.optimizer.step() # Update the vars of the train phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.runner_state['iters'] += 1 # Print the log info & reset the states. if self.runner_state['iters'] % self.solver_dict[ 'display_iter'] == 0: Log.info( 'Train Epoch: {0}\tTrain Iteration: {1}\t' 'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\t' 'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\n' 'Learning rate = {4}\tLoss = {3}\n'.format( self.runner_state['epoch'], self.runner_state['iters'], self.solver_dict['display_iter'], self.train_losses.info(), RunnerHelper.get_lr(self.optimizer), batch_time=self.batch_time, data_time=self.data_time)) self.batch_time.reset() self.data_time.reset() self.train_losses.reset() if self.solver_dict['lr'][ 'metric'] == 'iters' and self.runner_state[ 'iters'] == self.solver_dict['max_iters']: break if self.runner_state['iters'] % self.solver_dict[ 'save_iters'] == 0 and self.configer.get( 'local_rank') == 0: RunnerHelper.save_net(self, self.cls_net) # Check to val the current model. if self.runner_state['iters'] % self.solver_dict[ 'test_interval'] == 0: self.val()