Exemple #1
0
 def __init__(self,
              model,
              criterions,
              metric_ftns,
              optimizer,
              config,
              train_data_loader,
              valid_data_loader=None,
              lr_scheduler=None,
              weight_scheduler=None,
              test_data_loader=None):
     super().__init__(model, criterions, metric_ftns, optimizer, config,
                      train_data_loader, valid_data_loader, lr_scheduler,
                      weight_scheduler)
     self.train_teacher_metrics = MetricTracker(
         *[m.__name__ for m in self.metric_ftns], writer=self.writer)
     self.valid_metrics = MetricTracker(
         'loss',
         'supervised_loss',
         'kd_loss',
         'hint_loss',
         'teacher_loss',
         *[m.__name__ for m in self.metric_ftns],
         *['teacher_' + m.__name__ for m in self.metric_ftns],
         writer=self.writer)
     self.test_data_loader = test_data_loader
    def __init__(self, model, criterion, optimizer, scheduler, metric_ftns,
                 device, num_epoch, grad_clipping, grad_accumulation_steps,
                 early_stopping, validation_frequency, tensorboard,
                 checkpoint_dir, resume_path):
        self.device, device_ids = self._prepare_device(device)
        # self.model = model.to(self.device)

        self.start_epoch = 1
        if resume_path is not None:
            self._resume_checkpoint(resume_path)
        if len(device_ids) > 1:
            # self.model = torch.nn.DataParallel(model, device_ids=device_ids)
            self.model = torch.nn.DataParallel(model)
            # cudnn.benchmark = True
        if use_cuda:
            self.model = model.cuda()
        self.criterion = criterion
        self.metric_ftns = metric_ftns
        self.optimizer = optimizer
        self.num_epoch = num_epoch
        self.scheduler = scheduler
        self.grad_clipping = grad_clipping
        self.grad_accumulation_steps = grad_accumulation_steps
        self.early_stopping = early_stopping
        self.validation_frequency = validation_frequency
        self.checkpoint_dir = checkpoint_dir
        self.best_epoch = 1
        self.best_score = 0
        self.writer = TensorboardWriter(
            os.path.join(checkpoint_dir, 'tensorboard'), tensorboard)
        self.train_metrics = MetricTracker('loss', writer=self.writer)
        self.valid_metrics = MetricTracker(
            'loss',
            *[m.__name__ for m in self.metric_ftns],
            writer=self.writer)
 def __init__(self,
              model,
              criterion,
              metric_ftns,
              optimizer: Optimizer,
              config,
              data_loader,
              valid_data_loader,
              len_epoch=None,
              log_step=2):
     super().__init__(model, criterion, metric_ftns, optimizer, config)
     self.config = config
     self.data_loader = data_loader
     self.valid_data_loader = valid_data_loader
     self.do_validation = self.valid_data_loader is not None
     self.log_step = log_step
     if len_epoch is None:
         self.data_loader_iter = data_loader
         self.len_epoch = len(self.data_loader)
     else:
         self.data_loader_iter = inf_loop(self.data_loader)
         self.valid_loader_iter = inf_loop(self.valid_data_loader)
         self.len_epoch = len_epoch
         self.valid_len_epoch = 53
     self.train_metrics = MetricTracker(
         'train_loss',
         *['train_' + m.__name__ for m in self.metric_ftns],
         writer=self.writer)
     self.valid_metrics = MetricTracker(
         'val_loss',
         *['val_' + m.__name__ for m in self.metric_ftns],
         writer=self.writer)
Exemple #4
0
 def __init__(self,
              model,
              criterion,
              metric_ftns,
              config,
              data_loader,
              valid_data_loader=None,
              len_epoch=None):
     super().__init__(model, criterion, metric_ftns, config)
     self.config = config
     self.data_loader = data_loader
     if len_epoch is None:
         # epoch-based training
         self.len_epoch = len(self.data_loader)
     else:
         # iteration-based training
         self.data_loader = inf_loop(data_loader)
         self.len_epoch = len_epoch
     self.valid_data_loader = valid_data_loader
     self.do_validation = self.valid_data_loader is not None
     self.lr_scheduler = config.init_obj('lr_scheduler',
                                         torch.optim.lr_scheduler,
                                         self.optimizer)
     self.log_step = 200
     self.train_metrics = MetricTracker(
         'loss',
         *[m.__name__ for m in self.metric_ftns],
         writer=self.writer)
     self.valid_metrics = MetricTracker(
         'loss',
         *[m.__name__ for m in self.metric_ftns],
         writer=self.writer)
    def __init__(self, model, criterion, metric_ftns, optimizer, config, data_loader,
                 valid_data_loader=None, lr_scheduler=None, len_epoch=None):
        super().__init__(model, criterion, metric_ftns, optimizer, config)
        self.config = config
        self.data_loader = data_loader
        if len_epoch is None:
            # epoch-based training
            self.len_epoch = len(self.data_loader)
        else:
            # iteration-based training
            self.data_loader = inf_loop(data_loader)
            self.len_epoch = len_epoch
        self.valid_data_loader = valid_data_loader
        self.do_validation = self.valid_data_loader is not None
        self.lr_scheduler = lr_scheduler
        self.log_step = int(np.sqrt(data_loader.batch_size))
        self.n_batches = data_loader.n_samples / data_loader.batch_size
        self.n_batches_valid = valid_data_loader.n_samples / valid_data_loader.batch_size

        self.train_metrics = MetricTracker('loss', 'kl_cost', 'pred_cost', *[m.__name__ for m in self.metric_ftns], writer=self.writer)
        self.valid_metrics = MetricTracker('loss', 'kl_cost', 'pred_cost', *[m.__name__ for m in self.metric_ftns], writer=self.writer)

        self.keys.extend(['kl_cost', 'pred_cost'])
        if self.do_validation:
            keys_val = ['val_' + k for k in self.keys]
            for key in self.keys + keys_val:
                self.log[key] = []
Exemple #6
0
    def __init__(self,
                 model,
                 criterion,
                 metric_ftns,
                 optimizer,
                 config,
                 data_loader,
                 fold_id,
                 valid_data_loader=None,
                 class_weights=None):
        super().__init__(model, criterion, metric_ftns, optimizer, config,
                         fold_id)
        self.config = config
        self.data_loader = data_loader
        self.len_epoch = len(self.data_loader)

        self.valid_data_loader = valid_data_loader
        self.do_validation = self.valid_data_loader is not None
        self.lr_scheduler = optimizer
        self.log_step = int(
            data_loader.batch_size) * 1  # reduce this if you want more logs

        self.train_metrics = MetricTracker(
            'loss', *[m.__name__ for m in self.metric_ftns])
        self.valid_metrics = MetricTracker(
            'loss', *[m.__name__ for m in self.metric_ftns])

        self.fold_id = fold_id
        self.selected = 0
        self.class_weights = class_weights
Exemple #7
0
    def __init__(self, model, criterion, metric_ftns, optimizer, config, device,
                 data_loader, valid_data_loader=None, lr_scheduler=None, len_epoch=None):
        super().__init__(model, criterion, metric_ftns, optimizer, config)
        self.config = config
        self.device = device
        self.data_loader = data_loader
        self.train_metric_ftns = self.metric_ftns[0]
        self.val_metric_ftns = self.metric_ftns[1]
        if len_epoch is None:
            # epoch-based training
            self.len_epoch = len(self.data_loader)
        else:
            # iteration-based training
            self.data_loader = inf_loop(data_loader)
            self.len_epoch = len_epoch
        self.valid_data_loader = valid_data_loader
        self.do_validation = self.valid_data_loader is not None
        self.val_imgs = self.valid_data_loader.dataset.imgs

        self.lr_scheduler = lr_scheduler
        self.log_step = int(np.sqrt(data_loader.batch_size))

        self.train_metrics = MetricTracker('loss', *[m.__name__ for m in self.train_metric_ftns], writer=self.writer)
        self.valid_metrics = MetricTracker('loss', *[m.__name__ for m in self.val_metric_ftns], writer=self.writer)
        
        #training-related config
        cfg_enhance = self.config['trainer_enhance']
        
        self.mixup = cfg_enhance['mixup']
        if self.mixup == True:
            self.mixup_alpha = cfg_enhance['mixup_alpha']        
Exemple #8
0
    def __init__(self,
                 model,
                 criterion,
                 metric_ftns,
                 optimizer,
                 config,
                 data_loader,
                 valid_data_loader=None,
                 lr_scheduler=None,
                 len_epoch=None):
        super().__init__(model, criterion, metric_ftns, optimizer, config)
        self.config = config
        self.data_loader = data_loader
        if len_epoch is None:
            # epoch-based training
            self.len_epoch = len(self.data_loader)
        else:
            # iteration-based training
            self.data_loader = inf_loop(data_loader)
            self.len_epoch = len_epoch
        self.valid_data_loader = valid_data_loader
        self.do_validation = self.valid_data_loader is not None
        self.lr_scheduler = lr_scheduler
        self.log_step = int(np.sqrt(data_loader.batch_size))
        self.init_lr = config['optimizer']['args']['lr']
        self.warm_up = config['trainer']['warm_up']

        self.train_metrics = MetricTracker(
            'loss', *[m.__name__ for m in self.metric_ftns])
        self.valid_metrics = MetricTracker(
            'loss', *[m.__name__ for m in self.metric_ftns])
    def __init__(self,
                 model,
                 criterion,
                 metric_ftns,
                 optimizer,
                 config,
                 data_loader,
                 valid_data_loader=None,
                 lr_scheduler=None,
                 len_epoch=None):
        super().__init__(model, criterion, metric_ftns, optimizer, config)
        self.config = config
        self.data_loader = data_loader
        if len_epoch is None:
            # epoch-based training
            self.len_epoch = len(self.data_loader)
        else:
            # iteration-based training
            self.data_loader = inf_loop(data_loader)
            self.len_epoch = len_epoch
        self.valid_data_loader = valid_data_loader
        self.do_validation = self.valid_data_loader is not None
        self.lr_scheduler = lr_scheduler
        self.log_step = int(np.sqrt(len(data_loader)))

        self.train_metrics = MetricTracker()
        self.valid_metrics = MetricTracker()
Exemple #10
0
	def __init__(self, model, criterion, metric_ftns, optimizer, config, data_loader,
				 valid_data_loader=None, lr_scheduler=None, len_epoch=None):
		super().__init__(model, criterion, metric_ftns, optimizer, config)
		self.config = config
		self.data_loader = data_loader
		if len_epoch is None:
			# epoch-based training
			self.len_epoch = len(self.data_loader)
		else:
			# iteration-based training
			self.data_loader = inf_loop(data_loader)
			self.len_epoch = len_epoch
		self.valid_data_loader = valid_data_loader
		self.do_validation = self.valid_data_loader is not None
		self.lr_scheduler = lr_scheduler
		self.log_step = int(np.sqrt(data_loader.batch_size))

		self.train_metrics = MetricTracker('loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer)
		self.valid_metrics = MetricTracker('loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer)

		if hasattr(self.data_loader, 'n_valid_samples'):
			validation_samples=self.data_loader.n_valid_samples
		else:
			validation_samples=self.valid_data_loader.n_samples
		self.heatmap_sample_indices=np.sort(np.random.randint(validation_samples, size=min(16, validation_samples)))
    def __init__(self,
                 model_idx,
                 models,
                 criterion,
                 metric_ftns,
                 optimizers,
                 config,
                 padding_idx,
                 data_loader,
                 init_token,
                 lr_schedulers=None,
                 len_epoch=2):
        super().__init__(models, criterion, metric_ftns, optimizers, config)
        self.model_idx = model_idx
        self.config = config
        self.padding_idx = padding_idx
        self.init_token = init_token
        self.data_loader = data_loader
        self.batch_size = data_loader.batch_size
        self.vocab = data_loader.TEXT.vocab.itos
        self.len_epoch = len_epoch
        self.do_validation = self.config['trainer']['do_validation']
        self.lr_schedulers = lr_schedulers
        self.clip = self.config['trainer']['clip']
        self.log_step = int(np.sqrt(data_loader.batch_size))

        self.train_metrics = MetricTracker(
            'loss',
            *[m.__name__ for m in self.metric_ftns],
            writer=self.writer)
        self.valid_metrics = MetricTracker(
            'loss',
            *[m.__name__ for m in self.metric_ftns],
            writer=self.writer)
Exemple #12
0
    def __init__(self, model, criterion, metric_ftns, optimizer, config, train_iter, valid_iter, test_iter=None,
                 lr_scheduler=None, len_epoch=None):
        super().__init__(model, criterion, metric_ftns, optimizer, config)
        self.config = config
        self.train_iter, self.valid_iter, self.test_iter = train_iter, valid_iter, test_iter
        if len_epoch is None:
            # epoch-based training
            self.len_epoch = len(self.train_iter)
        else:
            # iteration-based training
            self.data_loader = inf_loop(train_iter)
            self.len_epoch = len_epoch

        self.do_validation = self.valid_iter is not None
        self.lr_scheduler = lr_scheduler
        self.log_step = int(np.sqrt(train_iter.batch_size))

        self.train_metrics = MetricTracker('tag_loss','crf_loss','total_loss','p','r','f', *[m.__name__ for m in self.metric_ftns], writer=self.writer)
        self.valid_metrics = MetricTracker('tag_loss', 'crf_loss','total_loss','p','r','f',*[m.__name__ for m in self.metric_ftns], writer=self.writer)

        # self.cross_entropy_weight_ = [1.0] * schema.class_tag_num[class_id]
        self.cross_entropy_weight_ = [1.0] * 9
        for i in range(1, 9):
            if i % 2 == 1:
                self.cross_entropy_weight_[i] = 1.5
        self.cross_entropy_weight_[0] = 0.1
Exemple #13
0
    def __init__(self,
                 model,
                 criterion,
                 metric_ftns,
                 optimizer,
                 config,
                 train_iter,
                 valid_iter,
                 test_iter=None,
                 lr_scheduler=None,
                 len_epoch=None):
        super().__init__(model, criterion, metric_ftns, optimizer, config)
        self.config = config
        self.train_iter, self.valid_iter, self.test_iter = train_iter, valid_iter, test_iter
        if len_epoch is None:
            # epoch-based training
            self.len_epoch = len(self.train_iter)
        else:
            # iteration-based training
            self.data_loader = inf_loop(train_iter)
            self.len_epoch = len_epoch

        self.do_validation = self.valid_iter is not None
        self.lr_scheduler = lr_scheduler
        self.log_step = int(np.sqrt(train_iter.batch_size))

        self.train_metrics = MetricTracker(
            'loss',
            *[m.__name__ for m in self.metric_ftns],
            writer=self.writer)
        self.valid_metrics = MetricTracker(
            'loss',
            *[m.__name__ for m in self.metric_ftns],
            writer=self.writer)
Exemple #14
0
    def __init__(self,
                 model,
                 loss,
                 metric_ftns,
                 optimizer,
                 config,
                 data_loader,
                 valid_data_loader=None,
                 lr_scheduler=None,
                 len_epoch=None):
        super().__init__(model, loss, metric_ftns, optimizer, config)
        self.config = config
        self.data_loader = data_loader
        self.valid_data_loader = valid_data_loader
        self.do_validation = self.valid_data_loader is not None

        if len_epoch is None:
            # epoch-based training
            self.len_epoch = len(self.data_loader)
            self.len_epoch_val = len(
                self.valid_data_loader) if self.do_validation else 0

        self.lr_scheduler = lr_scheduler
        self.log_step = int(np.sqrt(data_loader.batch_size))

        self.train_metrics = MetricTracker(
            'loss',
            *[m.__name__ for m in self.metric_ftns],
            writer=self.writer)
        self.valid_metrics = MetricTracker(
            'loss',
            *[m.__name__ for m in self.metric_ftns],
            writer=self.writer)
Exemple #15
0
 def __init__(self, models, criterion, metrics, device):
     self.criterion = criterion
     self.models = models
     self.device = device
     self.metrics = metrics
     self.valid_metrics = MetricTracker('loss',
                                        *[m.__name__ for m in self.metrics],
                                        writer=None)
     self.logger = logging.getLogger()
    def __init__(self,
                 models,
                 criterion,
                 metric_ftns,
                 optimizers,
                 config,
                 data_loader,
                 valid_data_loader=None,
                 lr_schedulers=None,
                 len_epoch=None):
        super().__init__(models, criterion, metric_ftns, optimizers, config)
        self.config = config
        self.data_loader = data_loader
        if len_epoch is None:
            # epoch-based training
            self.len_epoch = len(self.data_loader)
        else:
            # iteration-based training
            self.data_loader = inf_loop(data_loader)
            self.len_epoch = len_epoch
        self.valid_data_loader = valid_data_loader
        self.do_validation = self.valid_data_loader is not None
        self.lr_schedulers = lr_schedulers
        self.log_step = int(np.sqrt(data_loader.batch_size))

        self.train_metrics = MetricTracker(
            *['loss_' + str(i) for i in range(self.n_ensembles)],
            *[
                m.__name__ + '_' + str(i) for m in self.metric_ftns
                for i in range(self.n_ensembles)
            ],
            writer=self.writer)
        self.valid_metrics = MetricTracker(
            *['loss_' + str(i) for i in range(self.n_ensembles)],
            *[
                m.__name__ + '_' + str(i) for m in self.metric_ftns
                for i in range(self.n_ensembles)
            ],
            writer=self.writer)

        if self.do_validation:
            keys_val = ['val_' + k for k in self.keys]
            for key in self.keys + keys_val:
                self.log[key] = []

        cfg_loss = config['trainer']['loss']
        self.type_in = cfg_loss['type_in']
        self.alpha = cfg_loss['alpha']
        self.loss_type = cfg_loss['loss_type']
        self.censor_R = cfg_loss['censor_R']
        self.soften = cfg_loss['soften']
        self.lambda_in = cfg_loss['lambda_in']
        self.sigma_in = cfg_loss['sigma_in']

        self._create_loss()
    def __init__(self,
                 model,
                 loss_fn_class,
                 loss_fn_domain,
                 metric_ftns,
                 optimizer,
                 config,
                 device,
                 data_loader_source,
                 valid_data_loader_source=None,
                 data_loader_target=None,
                 valid_data_loader_target=None,
                 lr_scheduler=None,
                 len_epoch=None):
        super().__init__(model, metric_ftns, optimizer, config)
        self.config = config
        self.device = device
        self.loss_fn_class = loss_fn_class
        self.loss_fn_domain = loss_fn_domain
        self.data_loader_source = data_loader_source
        self.valid_data_loader_source = valid_data_loader_source
        self.data_loader_target = data_loader_target
        self.valid_data_loader_target = valid_data_loader_target
        self.model.to(self.device)

        if len_epoch is None:
            # epoch-based training
            self.len_epoch = min(len(self.data_loader_source),
                                 len(self.data_loader_target))
        else:
            # FIXME: implement source/target style training or remove this feature
            # iteration-based training
            self.data_loader = inf_loop(data_loader)
            self.len_epoch = len_epoch
        # FIXME: handle validation round
        self.valid_data_loader = valid_data_loader_source
        self.do_validation = self.valid_data_loader is not None

        self.lr_scheduler = lr_scheduler
        self.log_step = 64

        self.train_metrics = MetricTracker(
            'loss',
            'class_loss',
            'domain_loss',
            *[m.__name__ for m in self.metric_ftns],
            writer=self.writer)
        self.valid_metrics = MetricTracker(
            'loss',
            'class_loss',
            'domain_loss',
            *[m.__name__ for m in self.metric_ftns],
            writer=self.writer)
    def __init__(self, config, model, optimizer, criterion, dataloader):
        self.config = config
        self.model = model
        self.optimizer = optimizer
        self.criterion = criterion
        self.dataloader = dataloader
        self.device = next(self.model.parameters()).device

        self.losses = MetricTracker()
        self.accs = MetricTracker()

        self.tester = Tester(self.config, self.model)
Exemple #19
0
    def __init__(self,
                 s_model,
                 t_model,
                 epoch,
                 criterion,
                 metrics,
                 optimizer,
                 device,
                 data_loader,
                 valid_data_loader=None,
                 lr_scheduler=None,
                 len_epoch=None,
                 checkpoint=None,
                 sts=[]):  # sts=[stop, st_empty, save_dir]
        super().__init__(s_model,
                         criterion,
                         metrics,
                         optimizer,
                         epoch,
                         checkpoint,
                         save_dir=sts[2],
                         st_stop=sts[0])
        self.scaler = GradScaler()
        self.device = device
        self.s_model = self.model
        self.s_model = self.s_model.to(device)
        self.t_model = t_model
        self.t_model = self.t_model.to(device)
        self.kd_criterion = nn.KLDivLoss(size_average=False)
        self.data_loader = data_loader
        if len_epoch is None:
            self.len_epoch = len(self.data_loader)
        else:
            self.data_loader = inf_loop(data_loader)
            self.len_epoch = len_epoch
        self.valid_data_loader = valid_data_loader
        self.do_validation = self.valid_data_loader is not None
        self.lr_scheduler = lr_scheduler

        self.st_empty = sts[1]
        self.st_container = self.st_empty.beta_container()
        self.lossChart = self.st_container.line_chart()
        self.processBar = self.st_container.progress(0)
        self.epochResult = self.st_container.table()
        self.train_idx = 0

        self.log_step = 100
        self.train_metrics = MetricTracker('loss',
                                           *[m.__name__ for m in self.metrics],
                                           writer=None)
        self.valid_metrics = MetricTracker('loss',
                                           *[m.__name__ for m in self.metrics],
                                           writer=None)
Exemple #20
0
    def __init__(self, model, criterion, metric_ftns, optimizer, config, data_loader,
                 valid_data_loader=None, lr_scheduler=None, len_epoch=None):
        super().__init__(model, criterion, metric_ftns, optimizer, config)
        self.config = config

        self.distill = config._config.get('distill', False)
        
        # add_extra_info will return info about individual experts. This is crucial for individual loss. If this is false, we can only get a final mean logits.
        self.add_extra_info = config._config.get('add_extra_info', False)

        if self.distill:
            print("** Distill is on, please double check distill_checkpoint in config **")
            self.teacher_model = config.init_obj('distill_arch', module_arch)
            teacher_checkpoint = torch.load(config['distill_checkpoint'], map_location="cpu")

            self.teacher_model = self.teacher_model.to(self.device)

            teacher_state_dict = teacher_checkpoint["state_dict"]

            rename_parallel_state_dict(teacher_state_dict)
            
            if len(self.device_ids) > 1:
                print("Using multiple GPUs for teacher model")
                self.teacher_model = torch.nn.DataParallel(self.teacher_model, device_ids=self.device_ids)
                load_state_dict(self.teacher_model, {"module." + k: v for k, v in teacher_state_dict.items()}, no_ignore=True)
            else:
                load_state_dict(self.teacher_model, teacher_state_dict, no_ignore=True)

        self.data_loader = data_loader
        if len_epoch is None:
            # epoch-based training
            self.len_epoch = len(self.data_loader)
        else:
            # iteration-based training
            self.data_loader = inf_loop(data_loader)
            self.len_epoch = len_epoch

        if use_fp16:
            self.logger.warn("FP16 is enabled. This option should be used with caution unless you make sure it's working and we do not provide guarantee.")
            from torch.cuda.amp import GradScaler
            self.scaler = GradScaler()
        else:
            self.scaler = None

        self.valid_data_loader = valid_data_loader
        self.do_validation = self.valid_data_loader is not None
        self.lr_scheduler = lr_scheduler
        self.log_step = int(np.sqrt(data_loader.batch_size))

        self.train_metrics = MetricTracker('loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer)
        self.valid_metrics = MetricTracker('loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer)
Exemple #21
0
    def __init__(self,
                 model,
                 criterion,
                 metric_ftns,
                 optimizer,
                 config,
                 data_loader,
                 valid_data_loader=None,
                 test_data_loader=None,
                 lr_scheduler=None,
                 len_epoch=None,
                 overfit_single_batch=False):
        super().__init__(model, criterion, metric_ftns, optimizer, config)
        self.config = config
        self.data_loader = data_loader
        if len_epoch is None:
            # epoch-based training
            self.len_epoch = len(self.data_loader)
        else:
            # iteration-based training
            self.data_loader = inf_loop(data_loader)
            self.len_epoch = len_epoch
        self.valid_data_loader = valid_data_loader if not overfit_single_batch else None
        self.test_data_loader = test_data_loader if not overfit_single_batch else None
        self.do_validation = self.valid_data_loader is not None
        self.do_test = self.test_data_loader is not None
        self.lr_scheduler = lr_scheduler
        self.log_step = int(np.sqrt(data_loader.batch_size))
        self.overfit_single_batch = overfit_single_batch

        # -------------------------------------------------
        # add flexibility to allow no metric in config.json
        self.log_loss = ['loss', 'nll', 'kl']
        if self.metric_ftns is None:
            self.train_metrics = MetricTracker(*self.log_loss,
                                               writer=self.writer)
            self.valid_metrics = MetricTracker(*self.log_loss,
                                               writer=self.writer)
        # -------------------------------------------------
        else:
            self.train_metrics = MetricTracker(
                *self.log_loss,
                *[m.__name__ for m in self.metric_ftns],
                writer=self.writer)
            self.valid_metrics = MetricTracker(
                *self.log_loss,
                *[m.__name__ for m in self.metric_ftns],
                writer=self.writer)
            self.test_metrics = MetricTracker(
                *[m.__name__ for m in self.metric_ftns], writer=self.writer)
    def __init__(self, model, criterion, metric_ftns, optimizer, config, data_loader: TokenDataProducer,
                 valid_data_loader=None, lr_scheduler=None, len_epoch=None):
        super().__init__(model, criterion, metric_ftns, optimizer, config)
        self.config = config
        self.data_loader = data_loader
        self.data_loader = inf_loop(data_loader.batch_iterator())
        self.len_epoch = len_epoch
        self.valid_data_loader = valid_data_loader
        self.do_validation = self.valid_data_loader is not None
        self.lr_scheduler = lr_scheduler
        self.log_step = int(np.sqrt(data_loader.batch_size))

        self.train_metrics = MetricTracker('loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer)
        self.valid_metrics = MetricTracker('loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer)
Exemple #23
0
    def __init__(self,
                 model,
                 criterion,
                 metric_ftns,
                 config,
                 data_loader,
                 valid_data_loader=None,
                 len_epoch=None):
        super().__init__(model, criterion, metric_ftns, config)

        ##############
        # 要在这里调整图注意力,不然进去之后封装就会变得复杂
        # 传入这些矩阵的时候,因为一些权重没有放在GPU上,需要额外to一下
        self.model.g_att_v.init_params(data_loader.dataset.edge_matrix_v,
                                       data_loader.dataset.affectiveness_v,
                                       data_loader.dataset.embedding_concept_v,
                                       self.model.device)
        self.model.g_att_a.init_params(data_loader.dataset.edge_matrix_a,
                                       data_loader.dataset.affectiveness_a,
                                       data_loader.dataset.embedding_concept_a,
                                       self.model.device)
        self.model.g_att_t.init_params(data_loader.dataset.edge_matrix_t,
                                       data_loader.dataset.affectiveness_t,
                                       data_loader.dataset.embedding_concept_t,
                                       self.model.device)
        ##############

        self.config = config
        self.data_loader = data_loader
        if len_epoch is None:
            # epoch-based training
            self.len_epoch = len(self.data_loader)
        else:
            # iteration-based training
            self.data_loader = inf_loop(data_loader)
            self.len_epoch = len_epoch
        self.valid_data_loader = valid_data_loader
        self.do_validation = self.valid_data_loader is not None
        self.lr_scheduler = config.init_obj('lr_scheduler',
                                            torch.optim.lr_scheduler,
                                            self.optimizer)
        self.log_step = 200
        self.train_metrics = MetricTracker(
            'loss',
            *[m.__name__ for m in self.metric_ftns],
            writer=self.writer)
        self.valid_metrics = MetricTracker(
            'loss',
            *[m.__name__ for m in self.metric_ftns],
            writer=self.writer)
Exemple #24
0
    def __init__(self,
                 model,
                 criterion,
                 metric_ftns,
                 optimizer,
                 config,
                 device,
                 data_loader,
                 valid_data_loader=None,
                 lr_scheduler=None,
                 len_epoch=None,
                 logging=True,
                 use_vgg_loss=False):
        super().__init__(model, criterion, metric_ftns, optimizer, config)
        self.config = config
        self.device = device
        self.data_loader = data_loader
        if len_epoch is None:
            # epoch-based training
            self.len_epoch = len(self.data_loader)
        else:
            # iteration-based training
            self.data_loader = inf_loop(data_loader)
            self.len_epoch = len_epoch
        self.valid_data_loader = valid_data_loader
        self.do_validation = self.valid_data_loader is not None
        self.lr_scheduler = lr_scheduler
        self.log_step = int(np.sqrt(data_loader.batch_size))
        self.logging = logging

        # vgg loss
        vgg16 = models.vgg16(pretrained=True).features
        if torch.cuda.is_available():
            vgg16.cuda(device=device)
        self.vgg_loss = create_loss_model(vgg16,
                                          8,
                                          use_cuda=torch.cuda.is_available(),
                                          device=device)
        self.use_vgg_loss = use_vgg_loss

        self.train_metrics = MetricTracker(
            'loss',
            *[m.__name__ for m in self.metric_ftns],
            writer=self.writer)
        self.valid_metrics = MetricTracker(
            'loss',
            *[m.__name__ for m in self.metric_ftns],
            writer=self.writer)
Exemple #25
0
    def __init__(self,
                 model,
                 criterion,
                 metric_ftns,
                 optimizer_G,
                 optimizer_D,
                 config,
                 data_loader,
                 valid_data_loader=None,
                 lr_scheduler_G=None,
                 lr_scheduler_D=None,
                 len_epoch=None):
        super().__init__(model, criterion, metric_ftns, optimizer_G,
                         optimizer_D, config)
        self.config = config
        self.data_loader = data_loader
        if len_epoch is None:
            # epoch-based training
            self.len_epoch = len(self.data_loader.dataset)
            # self.len_epoch = len(self.data_loader.loader_)
        else:
            # iteration-based training
            self.data_loader = inf_loop(data_loader)
            self.len_epoch = len_epoch
        self.valid_data_loader = valid_data_loader
        self.lr_scheduler_G = lr_scheduler_G
        self.lr_scheduler_D = lr_scheduler_D
        self.log_step = int(data_loader.batch_size) * 10

        self.train_metrics = MetricTracker(
            'loss_G',
            'loss_D',
            *[m.__name__ for m in self.metric_ftns],
            writer=self.writer)
        self.valid_metrics = MetricTracker(
            'IOU',
            'CE',
            *[m.__name__ for m in self.metric_ftns],
            writer=self.writer)

        # AE reconstruction loss
        # self.AE_loss = F.cross_entropy

        # optimizer
        self.Generator_opt = optimizer_G
        self.Discriminator_opt = optimizer_D
Exemple #26
0
    def __init__(self, model, criterion, metric_ftns, optimizer, config, data_loader,
                 valid_data_loader=None, lr_scheduler=None, len_epoch=None):
        super().__init__(model, criterion, metric_ftns, optimizer, config)
        self.config = config
        self.data_loader = data_loader
        if len_epoch is None:
            # epoch-based training
            self.len_epoch = len(self.data_loader)
        else:
            # iteration-based training
            self.data_loader = inf_loop(data_loader)
            self.len_epoch = len_epoch
        self.valid_data_loader = valid_data_loader
        self.do_validation = self.valid_data_loader is not None
        self.lr_scheduler = lr_scheduler
        self.log_step = int(np.sqrt(data_loader.batch_size))

        self.track_loss = ['loss', 'recon', 'kld', 'lmse', 'contrast', 'cycle', 'cycle_mse', 'cycle_ce', 'pseudo', 'klc']

        self.train_metrics = MetricTracker(*self.track_loss, *[m.__name__ for m in self.metric_ftns], writer=self.writer)
        self.valid_metrics = MetricTracker(*self.track_loss, *[m.__name__ for m in self.metric_ftns], writer=self.writer)

        self.pitch_map = {i: n for n, i in enumerate(data_loader.dataset.pitch_map)}
        self.dynamic_map = {i: n for n, i in enumerate(data_loader.dataset.dynamic_map)}
        self.pitchclass_map = {i: n for n, i in enumerate(data_loader.dataset.pitchclass_map)}
        self.tf_map = {v: data_loader.dataset.family_map[k] for k,v in data_loader.dataset.instrument_map.items()}

        self.plot_step = 25

        self.recon_sample = np.random.choice(valid_data_loader.sampler.indices, size=10, replace=False)
        pitches = np.random.choice(82, size=len(self.recon_sample))
        self.sample_to_pitch = {k: v for k, v in zip(self.recon_sample, pitches)}

        self.spec_ext = ExtractSpectrogram(sr=SR, n_fft=NFFT, hop_length=HOP, n_mels=NMEL, mode='mel')
        self.x_max, self.x_min = 9.7666, -36.0437

        self.init_temp = self.model.temperature
        self.min_temp = self.model.min_temperature
        self.decay_rate = self.model.decay_rate

        self.pseudo_train = config['trainer']['pseudo_train']
        self.labeled = config['trainer']['labeled']
        self.labeled_sample = np.random.choice(data_loader.sampler.indices, size=int(len(data_loader.sampler.indices) * self.labeled), replace=False)

        self.freeze_encoder = config['trainer']['freeze_encoder']
        self.pitch_shift = config['trainer']['pitch_shift']
Exemple #27
0
    def __init__(self,
                 model,
                 criterion: TrackingLoss,
                 optimizer,
                 config,
                 device,
                 data_loader,
                 train_metrics: List[ActiveMetric] = None,
                 valid_metrics: List[ActiveMetric] = None,
                 valid_data_loader=None,
                 lr_schedulers=None,
                 len_epoch=None,
                 train_storage_keys=None,
                 valid_storage_keys=None,
                 items_len_key="targets"):
        super().__init__(model, criterion, optimizer, config)
        self.config = config
        self.device = device
        self.data_loader = data_loader
        if len_epoch is None:
            # epoch-based training
            self.len_epoch = len(self.data_loader)
        else:
            # iteration-based training
            self.data_loader = inf_loop(data_loader)
            self.len_epoch = len_epoch

        self.train_metrics = train_metrics if train_metrics else []
        self.valid_metrics = valid_metrics if valid_metrics else []
        assert (isinstance(self.train_metrics, collections.Collection))
        assert (isinstance(self.valid_metrics, collections.Collection))

        self.valid_data_loader = valid_data_loader
        self.do_validation = self.valid_data_loader is not None

        if lr_schedulers is None:
            self.lr_schedulers = []
        else:
            assert (isinstance(lr_schedulers, Sequence))
            self.lr_schedulers = lr_schedulers
        self.log_step = int(np.sqrt(data_loader.batch_size))
        self.train_storage_keys = train_storage_keys if train_storage_keys else []
        self.valid_storage_keys = valid_storage_keys if valid_storage_keys else []
        self.items_len_key = items_len_key

        self.tracker = MetricTracker(writer=self.writer)
Exemple #28
0
    def test(self, test_loader):
        self.model.eval()
        test_metrics = MetricTracker(
            'test_loss',
            *['test_' + m.__name__ for m in self.metric_ftns],
            writer=self.writer)

        with torch.no_grad():
            for batch_idx, batch in enumerate(test_loader):
                input_variables, input_lengths = getattr(batch, 'source')
                target = getattr(batch, 'target')
                output, _, sequence_info = self.model.forward(
                    input=input_variables,
                    input_lens=input_lengths,
                    target=target)
                loss = self.criterion.__call__(output, target)

                # set writer step
                self.writer.set_step(
                    (self.epochs - 1) * len(self.valid_data_loader) +
                    batch_idx, 'test')

                # set val metrics
                test_metrics.update('test_loss', loss.item())
                for metric in self.metric_ftns:
                    test_metrics.update('test_' + metric.__name__,
                                        metric(output, target, sequence_info))

        for name, p in self.model.named_parameters():
            self.writer.add_histogram('test_' + name, p, bins='auto')
        for key, value in test_metrics.result().items():
            self.logger.info('    {:15s}: {}'.format(str(key), value))
Exemple #29
0
    def __init__(self,
                 model,
                 criterion,
                 metric_ftns,
                 optimizer,
                 config,
                 data_loader,
                 trainable_params,
                 valid_data_loader=None,
                 lr_scheduler=None,
                 len_epoch=None):
        super().__init__(model, criterion, metric_ftns, optimizer, config)
        self.config = config
        self.data_loader = data_loader
        if len_epoch is None:
            # epoch-based training
            self.len_epoch = len(self.data_loader)
        else:
            # iteration-based training
            self.data_loader = inf_loop(data_loader)
            self.len_epoch = len_epoch
        self.valid_data_loader = valid_data_loader
        self.do_validation = self.valid_data_loader is not None
        self.lr_scheduler = lr_scheduler
        self.log_step = int(np.sqrt(data_loader.batch_size))
        self.losses_num = len(self.criterion)
        self.max_empirical_losses = self._compute_max_expirical_losses()
        copsolver = AnalyticalSolver()
        self.common_descent_vector = MultiObjectiveCDV(
            copsolver=copsolver,
            max_empirical_losses=self.max_empirical_losses,
            normalized=True)
        self.trainable_params = trainable_params
        self.opt_losses = self.config['opt_losses']

        self.train_metrics = MetricTracker(
            'loss',
            'weighted_loss',
            *[m.__name__ for m in self.metric_ftns],
            writer=self.writer)
        self.valid_metrics = MetricTracker(
            'loss',
            'weighted_loss',
            *[m.__name__ for m in self.metric_ftns],
            writer=self.writer)
Exemple #30
0
 def __init__(self, model, model_text, criterion, criterion_ret,
              metric_ftns, optimizer, config,
              data_loader, font_type,
              valid_data_loader=None, lr_scheduler=None, len_epoch=None):
     super().__init__(model, model_text, criterion, criterion_ret, metric_ftns, optimizer, config)
     self.config = config
     self.data_loader = data_loader
     self.font_type = font_type
     if len_epoch is None:
         # epoch-based training
         self.len_epoch = len(self.data_loader)
     else:
         # iteration-based training
         self.data_loader = inf_loop(data_loader)
         self.len_epoch = len_epoch
     self.valid_data_loader = valid_data_loader
     self.do_validation = self.valid_data_loader is not None
     self.lr_scheduler = lr_scheduler
     self.log_step = int(np.sqrt(data_loader.batch_size))
     self.no_tasks = len(_FACTORS_IN_ORDER)
     list_metrics = []
     for m in self.metric_ftns:
         for i in range(0, self.no_tasks):
             metric_task = f"{m.__name__}_{_FACTORS_IN_ORDER[i]}"
             list_metrics.append(metric_task)
     list_losses = []
     for i in range(0, self.no_tasks):
         list_losses.append(f"loss_{_FACTORS_IN_ORDER[i]}")
     self.train_metrics = MetricTracker('loss_classification', 'accuracy_retrieval',
                                        'loss_floor_hue', 'loss_wall_hue', 'loss_object_hue',
                                        'loss_retrieval', 'loss_tot', 'loss_scale', 'loss_shape',
                                        'loss_orientation', 'accuracy_floor_hue',
                                        'accuracy_wall_hue', 'accuracy_object_hue',
                                        'accuracy_scale', 'accuracy_shape',
                                        'accuracy_orientation', 'accuracy',
                                        writer=self.writer)
     self.valid_metrics = MetricTracker('loss_classification', 'accuracy_retrieval',
                                        'loss_floor_hue', 'loss_wall_hue', 'loss_object_hue',
                                        'loss_retrieval', 'loss_tot', 'loss_scale', 'loss_shape',
                                        'loss_orientation', 'accuracy_floor_hue',
                                        'accuracy_wall_hue', 'accuracy_object_hue',
                                        'accuracy_scale', 'accuracy_shape',
                                        'accuracy_orientation', 'accuracy',
                                        writer=self.writer)