Example #1
0
 def train_process(self):
     """Train process of parameter sharing."""
     self.train_loader = Dataset(mode='train').dataloader
     self.valid_loader = Dataset(mode='val').dataloader
     self.model = self.model.to(self.device)
     self.search_alg = SearchAlgorithm(SearchSpace())
     self.set_algorithm_model(self.model)
     self.optimizer = self._init_optimizer()
     self.lr_scheduler = self._init_lr_scheduler()
     self.loss_fn = self._init_loss()
     np.random.seed(self.cfg.seed)
     torch.manual_seed(self.cfg.seed)
     for i in range(self.cfg.epochs):
         self._train(self.model)
         train_top1, train_top5 = self._valid(self.model, self.train_loader)
         valid_top1, valid_top5 = self._valid(self.model, self.valid_loader)
         self.lr_scheduler.step()
         child_desc_temp = self.search_alg.codec.calc_genotype(
             self.model.arch_weights)
         logging.info(F.softmax(self.model.alphas_normal, dim=-1))
         logging.info(F.softmax(self.model.alphas_reduce, dim=-1))
         logging.info('normal = %s', child_desc_temp[0])
         logging.info('reduce = %s', child_desc_temp[1])
         logging.info('Epoch {}: train top1: {}, train top5: {}'.format(
             i, train_top1, train_top5))
         logging.info('Epoch {}: valid top1: {}, valid top5: {}'.format(
             i, valid_top1, valid_top5))
     child_desc = self.search_alg.codec.decode(self.model.arch_weights)
     self._save_descript(child_desc)
Example #2
0
 def __init__(self):
     self.step_name = General.step_name
     self.search_space = SearchSpace()
     self.search_alg = SearchAlgorithm(self.search_space.search_space)
     self.report = Report()
     self.record = ReportRecord()
     self.record.step_name = self.step_name
     if hasattr(self.search_alg.config, 'objective_keys'):
         self.record.objective_keys = self.search_alg.config.objective_keys
Example #3
0
 def before_train(self, logs=None):
     """Be called before the training process."""
     self.config = self.trainer.config
     self.unrolled = self.trainer.config.unrolled
     self.device = self.trainer.config.device
     self.model = self.trainer.model
     self.optimizer = self.trainer.optimizer
     self.lr_scheduler = self.trainer.lr_scheduler
     self.loss = self.trainer.loss
     self.search_alg = SearchAlgorithm(SearchSpace().search_space)
     self._set_algorithm_model(self.model)
     self.trainer.train_loader = self.trainer._init_dataloader(mode='train')
     self.trainer.valid_loader = self.trainer._init_dataloader(mode='val')
Example #4
0
 def before_train(self, epoch, logs=None):
     """Be called before the training process."""
     self.cfg = self.trainer.cfg
     self.trainer.auto_save_ckpt = False
     self.trainer.auto_save_perf = False
     self.unrolled = self.trainer.cfg.get('unrolled', True)
     self.device = self.trainer.cfg.get('device', 0)
     self.model = self.trainer.model
     self.optimizer = self.trainer.optimizer
     self.lr_scheduler = self.trainer.lr_scheduler
     self.loss = self.trainer.loss
     self.search_alg = SearchAlgorithm(SearchSpace())
     self._set_algorithm_model(self.model)
     self.trainer.train_loader = self.trainer._init_dataloader(mode='train')
     self.trainer.valid_loader = self.trainer._init_dataloader(mode='val')
Example #5
0
 def before_train(self, logs=None):
     """Be called before the training process."""
     # Use zero valid_interval to supress default valid step
     self.trainer.valid_interval = 0
     self.trainer.config.report_on_epoch = True
     if vega.is_torch_backend():
         cudnn.benchmark = True
         cudnn.enabled = True
     self.search_alg = SearchAlgorithm(SearchSpace().search_space)
     self.alg_policy = self.search_alg.config.policy
     self.set_algorithm_model(self.trainer.model)
     # setup alphas
     n_individual = self.alg_policy.num_individual
     self.alphas = np.stack([self.search_alg.random_sample_path()
                             for i in range(n_individual)], axis=0)
     self.trainer.train_loader = self.trainer._init_dataloader(mode='train')
     self.trainer.valid_loader = self.trainer._init_dataloader(mode='val')
Example #6
0
 def before_train(self, epoch, logs=None):
     """Be called before the training process."""
     # Use zero valid_freq to supress default valid step
     self.trainer.auto_save_ckpt = False
     self.trainer.auto_save_perf = False
     self.trainer.valid_freq = 0
     cudnn.benchmark = True
     cudnn.enabled = True
     self.search_alg = SearchAlgorithm(SearchSpace())
     self.set_algorithm_model(self.trainer.model)
     # setup alphas
     n_individual = self.alg_policy.num_individual
     self.alphas = torch.cat([
         self.trainer.model.random_single_path().unsqueeze(0)
         for i in range(n_individual)
     ],
                             dim=0)
     self.trainer.train_loader = self.trainer._init_dataloader(mode='train')
     self.trainer.valid_loader = self.trainer._init_dataloader(mode='val')