Example #1
0
    def do_test(self, model, dataloader, mode="VAL"):
        model.eval()
        y_pred, y_true = [], []
        eval_loss = 0.0
        with torch.no_grad():
            with tqdm(dataloader) as td:
                for batch_data in td:
                    vision = batch_data['vision'].to(self.args.device)
                    audio = batch_data['audio'].to(self.args.device)
                    text = batch_data['text'].to(self.args.device)
                    labels = batch_data['labels']['M'].to(self.args.device)
                    if self.args.train_mode == 'classification':
                        labels = labels.view(-1).long()
                    else:
                        labels = labels.view(-1, 1)
                    outputs = model(text, audio, vision)['M']
                    loss = self.criterion(outputs, labels)
                    eval_loss += loss.item()
                    y_pred.append(outputs.cpu())
                    y_true.append(labels.cpu())
        eval_loss = eval_loss / len(dataloader)
        pred, true = torch.cat(y_pred), torch.cat(y_true)
        eval_results = self.metrics(pred, true)
        eval_results["Loss"] = round(eval_loss, 4)

        logger.info("%s-(%s) >> %s" %
                    (mode, self.args.modelName, dict_to_str(eval_results)))
        return eval_results
Example #2
0
 def do_test(self, model, dataloader, mode="VAL"):
     model.eval()
     y_pred = {'M': [], 'T': [], 'A': [], 'V': []}
     y_true = {'M': [], 'T': [], 'A': [], 'V': []}
     eval_loss = 0.0
     with torch.no_grad():
         with tqdm(dataloader) as td:
             for batch_data in td:
                 vision = batch_data['vision'].to(self.args.device)
                 audio = batch_data['audio'].to(self.args.device)
                 text = batch_data['text'].to(self.args.device)
                 labels = batch_data['labels']
                 for k in labels.keys():
                     labels[k] = labels[k].to(self.args.device).view(-1, 1)
                 outputs = model(text, audio, vision)
                 loss = 0.0
                 for m in self.args.tasks:
                     loss += eval('self.args.' + m) * self.criterion(
                         outputs[m], labels[m])
                 eval_loss += loss.item()
                 for m in self.args.tasks:
                     y_pred[m].append(outputs[m].cpu())
                     y_true[m].append(labels['M'].cpu())
     eval_loss = eval_loss / len(dataloader)
     print(mode + "-(%s)" % self.args.modelName +
           " >> loss: %.4f " % eval_loss)
     return_res = {}
     for m in self.args.tasks:
         pred, true = torch.cat(y_pred[m]), torch.cat(y_true[m])
         results = self.metrics(pred, true)
         print('%s: >> ' % (m) + dict_to_str(results))
         return_res[m] = results
     return return_res
Example #3
0
 def do_test(self, model, dataloader, mode="VAL"):
     model.eval()
     y_pred, y_true = [], []
     eval_loss = 0.0
     with torch.no_grad():
         with tqdm(dataloader) as td:
             for batch_data in td:
                 vision = batch_data['vision'].to(self.args.device)
                 audio = batch_data['audio'].to(self.args.device)
                 text = batch_data['text'].to(self.args.device)
                 labels = batch_data['labels'][self.args.tasks].to(
                     self.args.device).view(-1, 1)
                 outputs = model(text, audio, vision)
                 loss = self.criterion(outputs[self.args.tasks], labels)
                 eval_loss += loss.item()
                 y_pred.append(outputs[self.args.tasks].cpu())
                 y_true.append(labels.cpu())
     eval_loss = eval_loss / len(dataloader)
     print(mode + "-(%s)" % self.args.modelName +
           " >> loss: %.4f " % eval_loss)
     pred, true = torch.cat(y_pred), torch.cat(y_true)
     results = self.metrics(pred, true)
     print('%s: >> ' % (self.args.tasks) + dict_to_str(results))
     tmp = {self.args.tasks: results}
     return tmp
Example #4
0
 def do_test(self, model, dataloader, mode="VAL"):
     model.eval()
     y_pred, y_true = [], []
     eval_loss = 0.0
     with torch.no_grad():
         with tqdm(dataloader) as td:
             for batch_data in td:
                 vision = batch_data['vision'].to(self.args.device)
                 audio = batch_data['audio'].to(self.args.device)
                 text = batch_data['text'].to(self.args.device)
                 labels = batch_data['labels']['M'].to(self.args.device).view(-1, 1)
                 outputs = model(text, audio, vision)
                 logits = outputs[0].squeeze()
                 if self.args.output_mode == "classification":
                     loss = self.classification_criterion(logits.view(-1, self.args.num_labels), labels.view(-1))
                 elif self.args.output_mode == "regression":
                     loss = self.regression_criterion(logits.view(-1), labels.view(-1))
                 eval_loss += loss.item()
                 y_pred.append(logits.cpu())
                 y_true.append(labels.cpu())
     eval_loss = eval_loss / len(dataloader)
     print(mode+"-(%s)" % self.args.modelName + " >> loss: %.4f " % eval_loss)
     pred, true = torch.cat(y_pred), torch.cat(y_true)
     test_results = self.metrics(pred, true, exclude_zero=self.args.excludeZero)
     print('%s-%s: >> ' %(mode, self.args.tasks) + dict_to_str(test_results))
     test_results['Loss'] = eval_loss
     tmp = {
         self.args.tasks: test_results
     }
     return tmp
Example #5
0
 def do_train(self, model, dataloader):
     optimizer = optim.Adam(model.parameters(),
                            lr=self.args.learning_rate,
                            weight_decay=self.args.weight_decay)
     # initilize results
     best_acc = 0
     epochs, best_epoch = 0, 0
     # loop util earlystop
     while True:
         epochs += 1
         # train
         y_pred, y_true = [], []
         losses = []
         model.train()
         train_loss = 0.0
         with tqdm(dataloader['train']) as td:
             for batch_data in td:
                 vision = batch_data['vision'].to(self.args.device)
                 audio = batch_data['audio'].to(self.args.device)
                 text = batch_data['text'].to(self.args.device)
                 labels = batch_data['labels'][self.args.tasks].to(
                     self.args.device).view(-1, 1)
                 # clear gradient
                 optimizer.zero_grad()
                 # forward
                 outputs = model(text, audio, vision)
                 # compute loss
                 loss = self.criterion(outputs[self.args.tasks], labels)
                 # backward
                 loss.backward()
                 # update
                 optimizer.step()
                 # store results
                 train_loss += loss.item()
                 y_pred.append(outputs[self.args.tasks].cpu())
                 y_true.append(labels.cpu())
         train_loss = train_loss / len(dataloader['train'])
         print("TRAIN-(%s) (%d/%d/%d)>> loss: %.4f " % (self.args.modelName, \
                     epochs - best_epoch, epochs, self.args.cur_time, train_loss))
         pred, true = torch.cat(y_pred), torch.cat(y_true)
         train_results = self.metrics(pred, true)
         print('%s: >> ' % (self.args.tasks) + dict_to_str(train_results))
         # validation
         val_results = self.do_test(model, dataloader['valid'], mode="VAL")
         val_acc = val_results[self.args.tasks][self.args.KeyEval]
         # save best model
         if val_acc > best_acc:
             best_acc, best_epoch = val_acc, epochs
             model_path = os.path.join(self.args.model_save_path,\
                                 f'{self.args.modelName}-{self.args.datasetName}-{self.args.tasks}.pth')
             if os.path.exists(model_path):
                 os.remove(model_path)
             # save model
             torch.save(model.cpu().state_dict(), model_path)
             model.to(self.args.device)
         # early stop
         if epochs - best_epoch >= self.args.early_stop:
             return
Example #6
0
 def do_train(self, model, dataloader):
     optimizer = optim.Adam(model.parameters(), lr=self.args.learning_rate, weight_decay=self.args.weight_decay)
     # initilize results
     epochs, best_epoch = 0, 0
     min_or_max = 'min' if self.args.KeyEval in ['Loss'] else 'max'
     best_valid = 1e8 if min_or_max == 'min' else 0
     # loop util earlystop
     while True: 
         epochs += 1
         # train
         y_pred, y_true = [], []
         losses = []
         model.train()
         train_loss = 0.0
         with tqdm(dataloader['train']) as td:
             for batch_data in td:
                 vision = batch_data['vision'].to(self.args.device)
                 audio = batch_data['audio'].to(self.args.device)
                 text = batch_data['text'].to(self.args.device)
                 labels = batch_data['labels']['M'].to(self.args.device)
                 if self.args.train_mode == 'classification':
                     labels = labels.view(-1).long()
                 else:
                     labels = labels.view(-1, 1)
                 # clear gradient
                 optimizer.zero_grad()
                 # forward
                 outputs = model(text, audio, vision)['M']
                 # compute loss
                 loss = self.criterion(outputs, labels)
                 # backward
                 loss.backward()
                 # update
                 optimizer.step()
                 # store results
                 train_loss += loss.item()
                 y_pred.append(outputs.cpu())
                 y_true.append(labels.cpu())
         train_loss = train_loss / len(dataloader['train'])
         
         pred, true = torch.cat(y_pred), torch.cat(y_true)
         train_results = self.metrics(pred, true)
         logger.info("TRAIN-(%s) (%d/%d/%d)>> loss: %.4f %s" % (self.args.modelName, \
                     epochs - best_epoch, epochs, self.args.cur_time, train_loss, dict_to_str(train_results)))
         # validation
         val_results = self.do_test(model, dataloader['valid'], mode="VAL")
         cur_valid = val_results[self.args.KeyEval]
         # save best model
         isBetter = cur_valid <= (best_valid - 1e-6) if min_or_max == 'min' else cur_valid >= (best_valid + 1e-6)
         # save best model
         if isBetter:
             best_valid, best_epoch = cur_valid, epochs
             # save model
             torch.save(model.cpu().state_dict(), self.args.model_save_path)
             model.to(self.args.device)
         # early stop
         if epochs - best_epoch >= self.args.early_stop:
             return
Example #7
0
    def do_test(self, model, dataloader, mode="VAL", need_details=True):
        model.eval()
        y_pred, y_true = [], []
        eval_loss = 0.0
        if need_details:
            ids, sample_results = [], []
            all_labels = []
            features = {
                "Feature_T": [],
                "Feature_A": [],
                "Feature_V": [],
                "Feature_M": [],
            }
        with torch.no_grad():
            with tqdm(dataloader) as td:
                for batch_data in td:
                    vision = batch_data['vision'].to(self.args.device)
                    audio = batch_data['audio'].to(self.args.device)
                    text = batch_data['text'].to(self.args.device)
                    labels = batch_data['labels']["M"].to(
                        self.args.device).view(-1).long()
                    outputs = model(text, audio, vision)

                    if need_details:
                        ids.extend(batch_data['id'])
                        for item in features.keys():
                            features[item].append(
                                outputs[item].cpu().detach().numpy())
                        all_labels.extend(labels.cpu().detach().tolist())
                        preds = outputs["M"].cpu().detach().numpy()
                        test_preds_i = np.argmax(preds, axis=1)
                        sample_results.extend(test_preds_i)

                    logits = outputs['M']
                    loss = self.criterion(logits, labels)
                    eval_loss += loss.item()
                    y_pred.append(outputs["M"].detach().cpu())
                    y_true.append(labels.detach().cpu())
        eval_loss = eval_loss / len(dataloader)

        pred, true = torch.cat(y_pred), torch.cat(y_true)
        results = self.metrics(pred, true)
        print(mode+"-(%s)" % self.args.modelName + " >> loss: %.4f " % \
                eval_loss + dict_to_str(results))
        results["Loss"] = round(eval_loss, 4)

        if need_details:
            results["Ids"] = ids
            results["SResults"] = sample_results
            for k in features.keys():
                features[k] = np.concatenate(features[k], axis=0)
            results['Features'] = features
            results['Labels'] = all_labels

        return results
Example #8
0
    def do_test(self, model, dataloader, mode="VAL"):
        model.eval()
        y_pred = {'M': [], 'T': [], 'A': [], 'V': []}
        y_true = {'M': [], 'T': [], 'A': [], 'V': []}
        eval_loss = 0.0
        criterion = nn.L1Loss()
        with torch.no_grad():
            with tqdm(dataloader) as td:
                for batch_data in td:
                    vision = batch_data['vision'].to(self.args.device)
                    audio = batch_data['audio'].to(self.args.device)
                    text = batch_data['text'].to(self.args.device)
                    if not self.args.aligned:
                        audio_lengths = batch_data['audio_lengths'].to(
                            self.args.device)
                        vision_lengths = batch_data['vision_lengths'].to(
                            self.args.device)
                    else:
                        audio_lengths, vision_lengths = 0, 0

                    labels_m = batch_data['labels']['M'].to(
                        self.args.device).view(-1)
                    outputs = model(text, (audio, audio_lengths),
                                    (vision, vision_lengths))
                    loss = self.weighted_loss(outputs['M'], labels_m)
                    # loss = criterion(outputs['M'], labels_m)
                    eval_loss += loss.item()
                    y_pred['M'].append(outputs['M'].cpu())
                    y_true['M'].append(labels_m.cpu())
        eval_loss = eval_loss / len(dataloader)
        print(mode + "-(%s)" % self.args.modelName +
              " >> loss: %.4f " % eval_loss)
        ret_res = {}
        for m in ['M']:
            pred, true = torch.cat(y_pred[m]), torch.cat(y_true[m])
            results = self.metrics(pred, true)
            print('%s: >> ' % (m) + dict_to_str(results))
            ret_res[m] = results
            ret_res[m]['Loss'] = eval_loss
        return ret_res
Example #9
0
    def do_train(self, model, dataloader):
        optimizer = optim.Adam(
            [{
                "params": list(model.Model.text_subnet.parameters()),
                "weight_decay": self.args.text_weight_decay
            }, {
                "params": list(model.Model.audio_subnet.parameters()),
                "weight_decay": self.args.audio_weight_decay
            }, {
                "params": list(model.Model.video_subnet.parameters()),
                "weight_decay": self.args.video_weight_decay
            }, {
                "params": list(model.parameters())[:3],
                "lr": self.args.factor_lr
            }, {
                "params": list(model.parameters())[3:5],
                "lr": 0.0
            }],
            lr=self.args.learning_rate,
            weight_decay=self.args.weight_decay)
        # initilize results
        epochs, best_epoch = 0, 0
        min_or_max = 'min' if self.args.KeyEval in ['Loss'] else 'max'
        best_valid = 1e8 if min_or_max == 'min' else 0
        # loop util earlystop
        while True:
            epochs += 1
            # train
            y_pred = {'M': [], 'T': [], 'A': [], 'V': []}
            y_true = {'M': [], 'T': [], 'A': [], 'V': []}
            losses = []
            model.train()
            train_loss = 0.0
            with tqdm(dataloader['train']) as td:
                for batch_data in td:
                    vision = batch_data['vision'].to(self.args.device)
                    audio = batch_data['audio'].to(self.args.device)
                    text = batch_data['text'].to(self.args.device)
                    labels = batch_data['labels']
                    for k in labels.keys():
                        if self.args.train_mode == 'classification':
                            labels[k] = labels[k].to(
                                self.args.device).view(-1).long()
                        else:
                            labels[k] = labels[k].to(self.args.device).view(
                                -1, 1)
                    # clear gradient
                    optimizer.zero_grad()
                    # forward
                    outputs = model(text, audio, vision)
                    # compute loss
                    loss = 0.0
                    for m in self.args.tasks:
                        loss += eval('self.args.' + m) * self.criterion(
                            outputs[m], labels[m])
                    # backward
                    loss.backward()
                    # update
                    optimizer.step()
                    # store results
                    train_loss += loss.item()
                    for m in self.args.tasks:
                        y_pred[m].append(outputs[m].cpu())
                        y_true[m].append(labels['M'].cpu())
            train_loss = train_loss / len(dataloader['train'])

            logger.info("TRAIN-(%s) (%d/%d/%d)>> loss: %.4f " % (self.args.modelName, \
                        epochs - best_epoch, epochs, self.args.cur_time, train_loss))
            for m in self.args.tasks:
                pred, true = torch.cat(y_pred[m]), torch.cat(y_true[m])
                train_results = self.metrics(pred, true)
                logger.info('%s: >> ' % (m) + dict_to_str(train_results))
            # validation
            val_results = self.do_test(model, dataloader['valid'], mode="VAL")
            cur_valid = val_results[self.args.KeyEval]
            # save best model
            isBetter = cur_valid <= (
                best_valid - 1e-6) if min_or_max == 'min' else cur_valid >= (
                    best_valid + 1e-6)
            # save best model
            if isBetter:
                best_valid, best_epoch = cur_valid, epochs
                # save model
                torch.save(model.cpu().state_dict(), self.args.model_save_path)
                model.to(self.args.device)
            # early stop
            if epochs - best_epoch >= self.args.early_stop:
                return
Example #10
0
    def do_test(self, model, dataloader, mode="VAL", need_details=False):
        model.eval()
        y_pred = {'M': [], 'T': [], 'A': [], 'V': []}
        y_true = {'M': [], 'T': [], 'A': [], 'V': []}
        eval_loss = 0.0
        if need_details:
            ids, sample_results = [], []
            all_labels = []
            features = {
                "Feature_T": [],
                "Feature_A": [],
                "Feature_V": [],
                "Feature_M": [],
            }
        with torch.no_grad():
            with tqdm(dataloader) as td:
                for batch_data in td:
                    vision = batch_data['vision'].to(self.args.device)
                    audio = batch_data['audio'].to(self.args.device)
                    text = batch_data['text'].to(self.args.device)
                    labels = batch_data['labels']
                    for k in labels.keys():
                        labels[k] = labels[k].to(
                            self.args.device).view(-1).long()
                    outputs = model(text, audio, vision)

                    if need_details:
                        ids.extend(batch_data['id'])
                        for item in features.keys():
                            features[item].append(
                                outputs[item].cpu().detach().numpy())
                        all_labels.extend(labels['M'].cpu().detach().tolist())
                        preds = outputs["M"].cpu().detach().numpy()
                        test_preds_i = np.argmax(preds, axis=1)
                        sample_results.extend(test_preds_i)

                    loss = 0.0
                    for m in self.args.tasks:
                        loss += eval('self.args.' + m) * self.criterion(
                            outputs[m], labels[m])
                    eval_loss += loss.item()
                    for m in self.args.tasks:
                        y_pred[m].append(outputs[m].detach().cpu())
                        y_true[m].append(labels['M'].detach().cpu())
        eval_loss = eval_loss / len(dataloader)
        print(mode + "-(%s)" % self.args.modelName +
              " >> loss: %.4f " % eval_loss)
        for i, m in enumerate(self.args.tasks):
            pred, true = torch.cat(y_pred[m]), torch.cat(y_true[m])
            results = self.metrics(pred, true)
            print('%s: >> ' % (m) + dict_to_str(results))
            if i == 0:
                eval_results = results
                eval_results["Loss"] = round(eval_loss, 4)
        if need_details:
            eval_results["Ids"] = ids
            eval_results["SResults"] = sample_results
            for k in features.keys():
                features[k] = np.concatenate(features[k], axis=0)
            eval_results['Features'] = features
            eval_results['Labels'] = all_labels
        return eval_results
Example #11
0
    def do_train(self, model, dataloader):
        self.model = model
        optimizer = optim.Adam(filter(lambda p: p.requires_grad,
                                      model.parameters()),
                               lr=self.args.learning_rate)
        # initilize results
        epochs, best_epoch = 0, 0
        min_or_max = 'min' if self.args.KeyEval in ['Loss'] else 'max'
        best_valid = 1e8 if min_or_max == 'min' else 0
        while True:
            epochs += 1
            # train
            y_pred, y_true = [], []
            losses = []
            model.train()
            train_loss = 0.0
            left_epochs = self.args.update_epochs
            with tqdm(dataloader['train']) as td:
                for batch_data in td:
                    # using accumulated gradients
                    if left_epochs == self.args.update_epochs:
                        optimizer.zero_grad()
                    left_epochs -= 1
                    text = batch_data['text'].to(self.args.device)
                    audio = batch_data['audio'].to(self.args.device)
                    vision = batch_data['vision'].to(self.args.device)
                    labels = batch_data['labels']['M'].to(self.args.device)
                    if self.args.train_mode == 'classification':
                        labels = labels.view(-1).long()
                    else:
                        labels = labels.view(-1, 1)
                    # forward
                    outputs = model(text, audio, vision)['M']
                    # compute loss
                    cls_loss = self.criterion(outputs, labels)
                    diff_loss = self.get_diff_loss()
                    domain_loss = self.get_domain_loss()
                    recon_loss = self.get_recon_loss()
                    cmd_loss = self.get_cmd_loss()

                    if self.args.use_cmd_sim:
                        similarity_loss = cmd_loss
                    else:
                        similarity_loss = domain_loss

                    loss = cls_loss + \
                           self.args.diff_weight * diff_loss + \
                           self.args.sim_weight * similarity_loss + \
                           self.args.recon_weight * recon_loss
                    # backward
                    loss.backward()
                    if self.args.grad_clip != -1.0:
                        torch.nn.utils.clip_grad_value_([
                            param for param in model.parameters()
                            if param.requires_grad
                        ], self.args.grad_clip)
                    # store results
                    train_loss += loss.item()
                    y_pred.append(outputs.cpu())
                    y_true.append(labels.cpu())
                    if not left_epochs:
                        optimizer.step()
                        left_epochs = self.args.update_epochs
                if not left_epochs:
                    # update
                    optimizer.step()
            train_loss = train_loss / len(dataloader['train'])

            pred, true = torch.cat(y_pred), torch.cat(y_true)
            train_results = self.metrics(pred, true)
            logger.info("TRAIN-(%s) (%d/%d/%d)>> loss: %.4f %s" % (self.args.modelName, \
                        epochs - best_epoch, epochs, self.args.cur_time, train_loss, dict_to_str(train_results)))
            # validation
            val_results = self.do_test(model, dataloader['valid'], mode="VAL")
            cur_valid = val_results[self.args.KeyEval]
            # save best model
            isBetter = cur_valid <= (
                best_valid - 1e-6) if min_or_max == 'min' else cur_valid >= (
                    best_valid + 1e-6)
            # save best model
            if isBetter:
                best_valid, best_epoch = cur_valid, epochs
                # save model
                torch.save(model.cpu().state_dict(), self.args.model_save_path)
                model.to(self.args.device)
            # early stop
            if epochs - best_epoch >= self.args.early_stop:
                return
Example #12
0
 def do_train(self, model, dataloader):
     optimizer = optim.Adam(
         [{
             "params": list(model.Model.text_subnet.parameters()),
             "weight_decay": self.args.text_weight_decay
         }, {
             "params": list(model.Model.audio_subnet.parameters()),
             "weight_decay": self.args.audio_weight_decay
         }, {
             "params": list(model.Model.video_subnet.parameters()),
             "weight_decay": self.args.video_weight_decay
         }],
         lr=self.args.learning_rate)
     # initilize results
     best_acc = 0
     epochs, best_epoch = 0, 0
     # loop util earlystop
     while True:
         epochs += 1
         # train
         y_pred = {'M': [], 'T': [], 'A': [], 'V': []}
         y_true = {'M': [], 'T': [], 'A': [], 'V': []}
         losses = []
         model.train()
         train_loss = 0.0
         with tqdm(dataloader['train']) as td:
             for batch_data in td:
                 vision = batch_data['vision'].to(self.args.device)
                 audio = batch_data['audio'].to(self.args.device)
                 text = batch_data['text'].to(self.args.device)
                 labels = batch_data['labels']
                 for k in labels.keys():
                     labels[k] = labels[k].to(self.args.device).view(-1, 1)
                 # clear gradient
                 optimizer.zero_grad()
                 # forward
                 outputs = model(text, audio, vision)
                 # compute loss
                 loss = 0.0
                 for m in self.args.tasks:
                     loss += eval('self.args.' + m) * self.criterion(
                         outputs[m], labels[m])
                 # backward
                 loss.backward()
                 # update
                 optimizer.step()
                 # store results
                 train_loss += loss.item()
                 for m in self.args.tasks:
                     y_pred[m].append(outputs[m].cpu())
                     y_true[m].append(labels['M'].cpu())
         train_loss = train_loss / len(dataloader['train'])
         print("TRAIN-(%s) (%d/%d/%d)>> loss: %.4f " % (self.args.modelName, \
                     epochs - best_epoch, epochs, self.args.cur_time, train_loss))
         for m in self.args.tasks:
             pred, true = torch.cat(y_pred[m]), torch.cat(y_true[m])
             train_results = self.metrics(pred, true)
             print('%s: >> ' % (m) + dict_to_str(train_results))
         # validation
         val_results = self.do_test(model, dataloader['valid'], mode="VAL")
         val_acc = val_results[self.args.tasks[0]][self.args.KeyEval]
         # save best model
         if val_acc > best_acc:
             best_acc, best_epoch = val_acc, epochs
             model_path = os.path.join(self.args.model_save_path,\
                                 f'{self.args.modelName}-{self.args.datasetName}-{self.args.tasks}.pth')
             if os.path.exists(model_path):
                 os.remove(model_path)
             # save model
             torch.save(model.cpu().state_dict(), model_path)
             model.to(self.args.device)
         # early stop
         if epochs - best_epoch >= self.args.early_stop:
             return
Example #13
0
    def do_train(self, model, dataloader):
        print("step into do_train ...")
        optimizer = optim.Adam(model.parameters(), lr=self.args.learning_rate, weight_decay=self.args.weight_decay)
        # initilize results
        epochs, best_epoch = 0, 0
        epoch_results = {
            'train': [],
            'valid': [],
            'test': []
        }
        min_or_max = 'min' if self.args.KeyEval in ['Loss'] else 'max'
        best_valid = 1e8 if min_or_max == 'min' else 0
        # loop util earlystop
        while True: 
            epochs += 1
            # train
            y_pred, y_true = [], []
            losses = []
            model.train()
            train_loss = 0.0
            with tqdm(dataloader['train']) as td:
                # with torchsnooper.snoop():
                for batch_data in td:
                    vision = batch_data['vision'].to(self.args.device)
                    audio = batch_data['audio'].to(self.args.device)
                    text = batch_data['text'].to(self.args.device)
                    labels = batch_data['labels']["M"].to(self.args.device).view(-1).long()

                    # clear gradient
                    optimizer.zero_grad()
                    # forward
                    outputs = model(text, audio, vision)
                    # compute loss
                    loss = self.criterion(outputs["M"], labels)
                    # print(loss.item())
                    # backward
                    loss.backward()
                    # update
                    optimizer.step()
                    # store results
                    train_loss += loss.item()
                    y_pred.append(outputs["M"].detach().cpu())
                    y_true.append(labels.detach().cpu())
            train_loss = train_loss / len(dataloader['train'])

            pred, true = torch.cat(y_pred), torch.cat(y_true)
            train_results = self.metrics(pred, true)
            train_results["Loss"] = round(train_loss, 4)
            epoch_results['train'].append(train_results)

            print("TRAIN-(%s) (%d/%d)>> loss: %.4f " % (self.args.modelName, \
                epochs - best_epoch, epochs, train_loss) + dict_to_str(train_results))

            # validation
            val_results = self.do_test(model, dataloader['valid'], mode="VAL")
            test_results = self.do_test(model, dataloader['test'], mode="TEST")
            epoch_results['valid'].append(val_results)
            epoch_results['test'].append(test_results)

            cur_valid = val_results[self.args.KeyEval]
            # save best model
            isBetter = cur_valid <= best_valid if min_or_max == 'min' else cur_valid >= best_valid
            # save best model
            if isBetter:
                best_valid, best_epoch = cur_valid, epochs
                # save model
                torch.save(model.cpu().state_dict(), self.args.model_save_path)
                model.to(self.args.device)
            # early stop
            if epochs - best_epoch >= self.args.early_stop:
                return epoch_results
Example #14
0
 def do_train(self, model, dataloader):
     param_optimizer = list(model.named_parameters())
     no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
     optimizer_grouped_parameters = [
         {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
         {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
     ]
     optimizer = AdamW(optimizer_grouped_parameters, lr=self.args.learning_rate)
     # initilize results
     epochs, best_epoch = 0, 0
     min_or_max = 'min' if self.args.KeyEval in ['Loss'] else 'max'
     best_valid = 1e8 if min_or_max == 'min' else 0
     while(epochs - best_epoch < self.args.early_stop): 
         epochs += 1
         # train
         y_pred, y_true = [], []
         losses = []
         model.train()
         train_loss = 0.0
         left_epochs = self.args.update_epochs
         with tqdm(dataloader['train']) as td:
             for batch_data in td:
                 if left_epochs == self.args.update_epochs:
                     optimizer.zero_grad()
                 left_epochs -= 1
                 text = batch_data['text'].to(self.args.device)
                 audio = batch_data['audio'].to(self.args.device)
                 vision = batch_data['vision'].to(self.args.device)
                 labels = batch_data['labels']['M'].squeeze().to(self.args.device)
                 # forward
                 outputs = model(text, audio, vision)
                 logits = outputs[0].squeeze()
                 # compute loss
                 if self.args.output_mode == "classification":
                     loss = self.classification_criterion(logits.view(-1, self.args.num_labels), labels.view(-1))
                 elif self.args.output_mode == "regression":
                     loss = self.regression_criterion(logits.view(-1), labels.view(-1))
                 # backward
                 loss.backward()
                 # store results
                 train_loss += loss.item()
                 y_pred.append(logits.cpu())
                 y_true.append(labels.cpu())
                 if not left_epochs:
                     optimizer.step()
                     left_epochs = self.args.update_epochs
             if not left_epochs:
                 # update
                 optimizer.step()
         train_loss = train_loss / len(dataloader['train'])
         print("TRAIN-(%s) (%d/%d/%d)>> loss: %.4f " % (self.args.modelName, \
                     epochs-best_epoch, epochs, self.args.cur_time, train_loss))
         pred, true = torch.cat(y_pred), torch.cat(y_true)
         train_results = self.metrics(pred, true, exclude_zero=self.args.excludeZero)
         print('%s: >> ' %(self.args.tasks) + dict_to_str(train_results))
         # validation
         val_results = self.do_test(model, dataloader['valid'], mode="VAL")
         cur_valid = val_results[self.args.tasks[0]][self.args.KeyEval]
         # save best model
         isBetter = cur_valid <= best_valid if min_or_max == 'min' else cur_valid >= best_valid
         if isBetter:
             best_valid, best_epoch = cur_valid, epochs
             model_path = os.path.join(self.args.model_save_path,\
                                 f'{self.args.modelName}-{self.args.datasetName}-{self.args.tasks}.pth')
             if os.path.exists(model_path):
                 os.remove(model_path)
             # save model
             torch.save(model.cpu().state_dict(), model_path)
             model.to(self.args.device)
             print('save model in %s...' % model_path)
             self.do_test(model, dataloader['test'], mode="TEST")
Example #15
0
    def do_train(self, model, dataloader):
        bert_no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        bert_params = list(model.Model.text_model.named_parameters())
        audio_params = list(model.Model.audio_model.named_parameters())
        video_params = list(model.Model.video_model.named_parameters())

        bert_params_decay = [
            p for n, p in bert_params
            if not any(nd in n for nd in bert_no_decay)
        ]
        bert_params_no_decay = [
            p for n, p in bert_params if any(nd in n for nd in bert_no_decay)
        ]
        audio_params = [p for n, p in audio_params]
        video_params = [p for n, p in video_params]
        model_params_other = [p for n, p in list(model.Model.named_parameters()) if 'text_model' not in n and \
                                'audio_model' not in n and 'video_model' not in n]

        optimizer_grouped_parameters = [{
            'params': bert_params_decay,
            'weight_decay': self.args.weight_decay_bert,
            'lr': self.args.learning_rate_bert
        }, {
            'params': bert_params_no_decay,
            'weight_decay': 0.0,
            'lr': self.args.learning_rate_bert
        }, {
            'params': audio_params,
            'weight_decay': self.args.weight_decay_audio,
            'lr': self.args.learning_rate_audio
        }, {
            'params': video_params,
            'weight_decay': self.args.weight_decay_video,
            'lr': self.args.learning_rate_video
        }, {
            'params': model_params_other,
            'weight_decay': self.args.weight_decay_other,
            'lr': self.args.learning_rate_other
        }]
        optimizer = optim.Adam(optimizer_grouped_parameters)

        saved_labels = {}
        # init labels
        logger.info("Init labels...")
        with tqdm(dataloader['train']) as td:
            for batch_data in td:
                labels_m = batch_data['labels']['M'].view(-1).to(
                    self.args.device)
                indexes = batch_data['index'].view(-1)
                self.init_labels(indexes, labels_m)

        # initilize results
        logger.info("Start training...")
        epochs, best_epoch = 0, 0
        min_or_max = 'min' if self.args.KeyEval in ['Loss'] else 'max'
        best_valid = 1e8 if min_or_max == 'min' else 0
        # loop util earlystop
        while True:
            epochs += 1
            # train
            y_pred = {'M': [], 'T': [], 'A': [], 'V': []}
            y_true = {'M': [], 'T': [], 'A': [], 'V': []}
            losses = []
            model.train()
            train_loss = 0.0
            left_epochs = self.args.update_epochs
            ids = []
            with tqdm(dataloader['train']) as td:
                for batch_data in td:
                    if left_epochs == self.args.update_epochs:
                        optimizer.zero_grad()
                    left_epochs -= 1

                    vision = batch_data['vision'].to(self.args.device)
                    audio = batch_data['audio'].to(self.args.device)
                    text = batch_data['text'].to(self.args.device)
                    indexes = batch_data['index'].view(-1)
                    cur_id = batch_data['id']
                    ids.extend(cur_id)

                    if not self.args.need_data_aligned:
                        audio_lengths = batch_data['audio_lengths'].to(
                            self.args.device)
                        vision_lengths = batch_data['vision_lengths'].to(
                            self.args.device)
                    else:
                        audio_lengths, vision_lengths = 0, 0

                    # forward
                    outputs = model(text, (audio, audio_lengths),
                                    (vision, vision_lengths))
                    # store results
                    for m in self.args.tasks:
                        y_pred[m].append(outputs[m].cpu())
                        y_true[m].append(
                            self.label_map[self.name_map[m]][indexes].cpu())
                    # compute loss
                    loss = 0.0
                    for m in self.args.tasks:
                        loss += self.weighted_loss(outputs[m], self.label_map[self.name_map[m]][indexes], \
                                                    indexes=indexes, mode=self.name_map[m])
                    # backward
                    loss.backward()
                    train_loss += loss.item()
                    # update features
                    f_fusion = outputs['Feature_f'].detach()
                    f_text = outputs['Feature_t'].detach()
                    f_audio = outputs['Feature_a'].detach()
                    f_vision = outputs['Feature_v'].detach()
                    if epochs > 1:
                        self.update_labels(f_fusion, f_text, f_audio, f_vision,
                                           epochs, indexes, outputs)

                    self.update_features(f_fusion, f_text, f_audio, f_vision,
                                         indexes)
                    self.update_centers()

                    # update parameters
                    if not left_epochs:
                        # update
                        optimizer.step()
                        left_epochs = self.args.update_epochs
                if not left_epochs:
                    # update
                    optimizer.step()
            train_loss = train_loss / len(dataloader['train'])
            logger.info("TRAIN-(%s) (%d/%d/%d)>> loss: %.4f " % (self.args.modelName, \
                        epochs-best_epoch, epochs, self.args.cur_time, train_loss))
            for m in self.args.tasks:
                pred, true = torch.cat(y_pred[m]), torch.cat(y_true[m])
                train_results = self.metrics(pred, true)
                logger.info('%s: >> ' % (m) + dict_to_str(train_results))
            # validation
            val_results = self.do_test(model, dataloader['valid'], mode="VAL")
            cur_valid = val_results[self.args.KeyEval]
            # save best model
            isBetter = cur_valid <= (
                best_valid - 1e-6) if min_or_max == 'min' else cur_valid >= (
                    best_valid + 1e-6)
            if isBetter:
                best_valid, best_epoch = cur_valid, epochs
                # save model
                torch.save(model.cpu().state_dict(), self.args.model_save_path)
                model.to(self.args.device)
            # save labels
            if self.args.save_labels:
                tmp_save = {
                    k: v.cpu().numpy()
                    for k, v in self.label_map.items()
                }
                tmp_save['ids'] = ids
                saved_labels[epochs] = tmp_save
            # early stop
            if epochs - best_epoch >= self.args.early_stop:
                if self.args.save_labels:
                    with open(
                            os.path.join(
                                self.args.res_save_dir,
                                f'{self.args.modelName}-{self.args.datasetName}-labels.pkl'
                            ), 'wb') as df:
                        plk.dump(saved_labels, df, protocol=4)
                return
Example #16
0
    def do_train(self, model, dataloader):
        optimizer = optim.Adam(model.parameters(), lr=self.args.learning_rate)
        scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.1, verbose=True, patience=self.args.patience)
        # initilize results
        epochs, best_epoch = 0, 0
        epoch_results = {
            'train': [],
            'valid': [],
            'test': []
        }
        min_or_max = 'min' if self.args.KeyEval in ['Loss'] else 'max'
        best_valid = 1e8 if min_or_max == 'min' else 0
        # loop util earlystop
        while True: 
            epochs += 1
            # train
            y_pred, y_true = [], []
            losses = []
            model.train()
            train_loss = 0.0
            left_epochs = self.args.update_epochs
            with tqdm(dataloader['train']) as td:
                for batch_data in td:
                    if left_epochs == self.args.update_epochs:
                        optimizer.zero_grad()
                    left_epochs -= 1
                    vision = batch_data['vision'].to(self.args.device)
                    audio = batch_data['audio'].to(self.args.device)
                    text = batch_data['text'].to(self.args.device)
                    labels = batch_data['labels']["M"].to(self.args.device).view(-1).long()
                    # forward
                    outputs = model(text, audio, vision)
                    # compute loss
                    loss = self.criterion(outputs["M"], labels)
                    # backward
                    loss.backward()
                    nn.utils.clip_grad_value_([param for param in model.parameters() if param.requires_grad], self.args.grad_clip)
                    # update
                    optimizer.step()
                    # store results
                    train_loss += loss.item()
                    y_pred.append(outputs['M'].detach().cpu())
                    y_true.append(labels.detach().cpu())
                    if not left_epochs:
                        optimizer.step()
                        left_epochs = self.args.update_epochs
                if not left_epochs:
                    # update
                    optimizer.step()
            train_loss = train_loss / len(dataloader['train'])

            pred, true = torch.cat(y_pred), torch.cat(y_true)
            train_results = self.metrics(pred, true)
            train_results["Loss"] = round(train_loss, 4)
            epoch_results['train'].append(train_results)
            print("TRAIN-(%s) (%d/%d)>> loss: %.4f " % (self.args.modelName, \
                epochs - best_epoch, epochs, train_loss) + dict_to_str(train_results))
            # validation
            val_results = self.do_test(model, dataloader['valid'], mode="VAL")
            test_results = self.do_test(model, dataloader['test'], mode="TEST")
            epoch_results['valid'].append(val_results)
            epoch_results['test'].append(test_results)

            cur_valid = val_results[self.args.KeyEval]
            scheduler.step(val_results['Loss'])
            # save best model
            isBetter = cur_valid <= best_valid if min_or_max == 'min' else cur_valid >= best_valid
            # save best model
            if isBetter:
                best_valid, best_epoch = cur_valid, epochs
                # save model
                torch.save(model.cpu().state_dict(), self.args.model_save_path)
                model.to(self.args.device)
            # early stop
            if epochs - best_epoch >= self.args.early_stop:
                return epoch_results