Пример #1
0
    def finalize_metrics(self, ks=(1, 5)):
        """
        Calculate and log the final ensembled metrics.
        ks (tuple): list of top-k values for topk_accuracies. For example,
            ks = (1, 5) correspods to top-1 and top-5 accuracy.
        """
        if not all(self.clip_count == self.num_clips):
            logger.warning("clip count {} ~= num clips {}".format(
                ", ".join([
                    "{}: {}".format(i, k)
                    for i, k in enumerate(self.clip_count.tolist())
                ]),
                self.num_clips,
            ))

        stats = {"split": "test_final"}
        if self.multi_label:
            map = get_map(self.video_preds.cpu().numpy(),
                          self.video_labels.cpu().numpy())
            stats["map"] = map
        else:
            num_topks_correct = metrics.topks_correct(self.video_preds,
                                                      self.video_labels, ks)
            topks = [(x / self.video_preds.size(0)) * 100.0
                     for x in num_topks_correct]
            assert len({len(ks), len(topks)}) == 1
            for k, topk in zip(ks, topks):
                stats["top{}_acc".format(k)] = "{:.{prec}f}".format(topk,
                                                                    prec=2)
        logging.log_json_stats(stats)
Пример #2
0
def eval_epoch(val_loader, model, epoch, cfg):
    '''Evaluate the model on the val set.

    Args:
      val_loader (loader): data loader to provide validation data.
      model (model): model to evaluate the performance.
      epoch (int): number of the current epoch of training.
      cfg (CfgNode): configs. Details can be found in config/defaults.py
    '''
    if is_master_proc():
        log.info('Testing..')

    model.eval()
    test_loss = 0.0
    correct = total = 0.0
    for batch_idx, (inputs, labels) in enumerate(val_loader):
        inputs, labels = inputs.cuda(non_blocking=True), labels.cuda()
        outputs = model(inputs)
        loss = F.cross_entropy(outputs, labels, reduction='mean')

        # Gather all predictions across all devices.
        if cfg.NUM_GPUS > 1:
            loss = all_reduce([loss])[0]
            outputs, labels = all_gather([outputs, labels])

        # Accuracy.
        batch_correct = topks_correct(outputs, labels, (1, ))[0]
        correct += batch_correct.item()
        total += labels.size(0)

        if is_master_proc():
            test_loss += loss.item()
            test_acc = correct / total
            log.info('Loss: %.3f | Acc: %.3f' % (test_loss /
                                                 (batch_idx + 1), test_acc))
Пример #3
0
def valid_epoch_wo_outlier(model, in_loader, loss_func, cur_epoch):
    global global_cfg
    model.eval()
    avg_loss = 0
    correct = 0
    in_data_size = len(in_loader.dataset)
    for cur_iter, in_set in enumerate(in_loader):
        # Data to GPU
        data = in_set[0]
        targets = in_set[1]
        data, targets = data.cuda(), targets.cuda()

        # Foward propagation and Calculate loss
        (g_logits, h_logits, f_logits) = model(data, cur_epoch)
        loss = F.cross_entropy(g_logits[:len(targets)], targets)

        # Calculate classifier error about in-distribution sample
        num_topks_correct = metrics.topks_correct(g_logits[:len(targets)],
                                                  targets, (1, ))
        [top1_correct] = [x for x in num_topks_correct]

        # Add additional metrics!!

        loss, top1_correct = loss.item(), top1_correct.item()
        avg_loss += loss
        correct += top1_correct

    summary = {
        'avg_loss': avg_loss / in_data_size,
        'classifier_acc': correct / in_data_size,
        'epoch': cur_epoch,
    }

    return summary
Пример #4
0
def valid_epoch_w_outlier(model, in_loader, out_loader, loss_func, detector_func, cur_epoch):
    global global_cfg  
    model.eval()
    avg_loss = 0
    correct = 0
    total = 0
    max_iter = 0
    avg_auroc = 0
    avg_aupr = 0
    avg_fpr = 0
    in_data_size = len(in_loader.dataset)
    for cur_iter, (in_set, out_set) in enumerate(zip(in_loader, out_loader)):        
        # Data to GPU
        data = torch.cat((in_set[0], out_set[0]), 0)
        targets = in_set[1]
        data, targets = data.cuda(), targets.cuda()
        
        # Foward propagation and Calculate loss and confidence
        logits = model(data)
        global_cfg['loss']['model'] = model
        global_cfg['loss']['data'] = data
        global_cfg['detector']['model'] = model
        global_cfg['detector']['data'] = data
        loss_dict = loss_func(logits, targets, global_cfg['loss'])
        loss = loss_dict['loss']
        confidences_dict = detector_func(logits, targets, global_cfg['detector'])
        confidences = confidences_dict['confidences']
        
        ## METRICS ##
        # Calculate classifier error about in-distribution sample
        num_topks_correct = metrics.topks_correct(logits[:len(targets)], targets, (1,))
        [top1_correct] = [x for x in num_topks_correct]
        
        # Calculate OOD metrics (auroc, aupr, fpr)
        (auroc, aupr, fpr) = metrics.get_ood_measures(confidences, targets)
        
        # Add additional metrics!!!
        
        ## Update stats ##
        loss, top1_correct = loss.item(), top1_correct.item()
        avg_loss += loss
        correct += top1_correct
        total += targets.size(0)
        max_iter += 1
        avg_auroc += auroc
        avg_aupr += aupr
        avg_fpr += fpr
        
    
    summary = {
        'avg_loss': avg_loss / total,
        'classifier_acc': correct / total,
        'AUROC': avg_auroc / max_iter,
        'AUPR' : avg_aupr / max_iter,
        'FPR95': avg_fpr / max_iter,
        'epoch': cur_epoch,
    }
    
    return summary
Пример #5
0
def train_epoch_wo_outlier(model, optimizer, in_loader, loss_func, cur_epoch, op_cfg, writer):
    global global_cfg
    model.train()
    avg_loss = 0
    correct = 0
    in_data_size = len(in_loader.dataset)
    for cur_iter, in_set in enumerate(in_loader):
        #TODO: Dimension of in_set and out_set should be checked!
        # Data to GPU
        data = in_set[0]
        targets = in_set[1]
        if cur_iter == 0:
            writer.add_image('in_dist target {}'.format(targets[0]), data[0], cur_epoch)
        data, targets = data.cuda(), targets.cuda()

        # Adjust Learning rate
        lr = optim.get_lr_at_epoch(op_cfg, cur_epoch + float(cur_iter) / in_data_size)
        optim.set_lr(optimizer, lr)
        
        # Foward propagation and Calculate loss
        logits = model(data)
        
        (ava_logits, ova_logits) = logits
        
        #print(logits.size())
        global_cfg['loss']['model'] = model
        global_cfg['loss']['data'] = data
        loss_dict = loss_func(logits, targets, global_cfg['loss'])
        loss = loss_dict['loss']
        
        logits = F.softmax(ava_logits, dim=1)     
        
        # Back propagation
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        
        
        # Calculate classifier error about in-distribution sample
        num_topks_correct = metrics.topks_correct(logits[:len(targets)], targets, (1,))
        [top1_correct] = [x for x in num_topks_correct]
        
        # Add additional metrics!!!
        
        loss, top1_correct = loss.item(), top1_correct.item()
        avg_loss += loss
        correct += top1_correct
    
    summary = {
        'avg_loss': avg_loss / in_data_size,
        'classifier_acc': correct / in_data_size,
        'lr': optim.get_lr_at_epoch(op_cfg, cur_epoch),
        'epoch': cur_epoch,
    }
    
    return summary
Пример #6
0
def valid_epoch_wo_outlier(model,
                           in_loader,
                           loss_func,
                           cur_epoch,
                           logfile2,
                           attack_in=None):
    global global_cfg
    model.eval()
    avg_loss = 0
    correct = 0
    total = 0

    in_data_size = len(in_loader.dataset)
    for cur_iter, in_set in enumerate(in_loader):
        # Data to GPU
        data = in_set[0]
        targets = in_set[1].cuda()

        if attack_in is not None:
            adv_inputs = attack_in.perturb(data.cuda(), targets)
            data = adv_inputs.cuda()
        else:
            data, targets = data.cuda(), targets.cuda()
            # Foward propagation and Calculate loss

        (g_logits, h_logits, f_logits) = model(data)
        loss = F.cross_entropy(g_logits[:len(targets)], targets)

        # Calculate classifier error about in-distribution sample
        num_topks_correct = metrics.topks_correct(g_logits[:len(targets)],
                                                  targets, (1, ))
        [top1_correct] = [x for x in num_topks_correct]

        # Add additional metrics!!!
        metrics.show_wrong_samples_targets(g_logits[:len(targets)], targets,
                                           logfile2)

        loss, top1_correct = loss.item(), top1_correct.item()
        avg_loss += loss
        correct += top1_correct
        total += targets.size(0)

    logfile2.write(
        "Dataset size [{}] | Total samples [{}] | Correct samples [{}]\n".
        format(in_data_size, total, correct))
    summary = {
        'avg_loss': avg_loss / in_data_size,
        'classifier_acc': correct / in_data_size,
        'epoch': cur_epoch,
    }

    return summary
Пример #7
0
def train_epoch(train_loader, model, optimizer, epoch, cfg):
    '''Epoch training.

    Args:
      train_loader (DataLoader): training data loader.
      model (model): the video model to train.
      optimizer (optim): the optimizer to perform optimization on the model's parameters.
      epoch (int): current epoch of training.
      cfg (CfgNode): configs. Details can be found in config/defaults.py
    '''
    if is_master_proc():
        log.info('Epoch: %d' % epoch)

    model.train()
    num_batches = len(train_loader)
    train_loss = 0.0
    correct = total = 0.0
    for batch_idx, (inputs, labels) in enumerate(train_loader):
        inputs, labels = inputs.cuda(non_blocking=True), labels.cuda()

        # Update lr.
        lr = get_epoch_lr(cfg, epoch + float(batch_idx) / num_batches)
        set_lr(optimizer, lr)

        # Forward.
        outputs = model(inputs)
        loss = F.cross_entropy(outputs, labels, reduction='mean')

        # Backward.
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # Gather all predictions across all devices.
        if cfg.NUM_GPUS > 1:
            loss = all_reduce([loss])[0]
            outputs, labels = all_gather([outputs, labels])

        # Accuracy.
        batch_correct = topks_correct(outputs, labels, (1, ))[0]
        correct += batch_correct.item()
        total += labels.size(0)

        if is_master_proc():
            train_loss += loss.item()
            train_acc = correct / total
            log.info('Loss: %.3f | Acc: %.3f | LR: %.3f' %
                     (train_loss / (batch_idx + 1), train_acc, lr))
Пример #8
0
def valid_epoch_wo_outlier(model, in_loader, loss_func, cur_epoch, logfile2):
    global global_cfg
    model.eval()
    avg_loss = 0
    correct = 0
    total = 0

    in_data_size = len(in_loader.dataset)
    for cur_iter, in_set in enumerate(in_loader):
        # Data to GPU
        data = in_set[0]
        targets = in_set[1]
        data, targets = data.cuda(), targets.cuda()

        # Foward propagation and Calculate loss
        logits = model(data)
        (ava_logits, ova_logits) = logits

        global_cfg['loss']['model'] = model
        global_cfg['loss']['data'] = data
        loss_dict = loss_func(logits, targets, global_cfg['loss'])
        loss = loss_dict['loss']
        #logits = F.softmax(ava_logits, dim=1) * torch.sigmoid(ova_logits)
        logits = F.softmax(ava_logits, dim=1)
        # Calculate classifier error about in-distribution sample
        num_topks_correct = metrics.topks_correct(logits[:len(targets)],
                                                  targets, (1, ))
        [top1_correct] = [x for x in num_topks_correct]

        # Add additional metrics!!!
        metrics.show_wrong_samples_targets(logits[:len(targets)], targets,
                                           logfile2)

        loss, top1_correct = loss.item(), top1_correct.item()
        avg_loss += loss
        correct += top1_correct
        total += targets.size(0)

    logfile2.write(
        "Dataset size [{}] | Total samples [{}] | Correct samples [{}]\n".
        format(in_data_size, total, correct))
    summary = {
        'avg_loss': avg_loss / in_data_size,
        'classifier_acc': correct / in_data_size,
        'epoch': cur_epoch,
    }

    return summary
Пример #9
0
def train_epoch_w_outlier(model, optimizer, in_loader, out_loader, loss_func, detector_func, cur_epoch, op_cfg, writer):
    global global_cfg
    model.train()
    avg_loss = 0
    correct = 0
    total = 0
    in_data_size = len(in_loader.dataset)
    out_loader.dataset.offset = np.random.randint(len(out_loader.dataset))
    for cur_iter, (in_set, out_set) in enumerate(zip(in_loader, out_loader)):
        #TODO: Dimension of in_set and out_set should be checked!
        
        # Data to GPU
        data = torch.cat((in_set[0], out_set[0]), 0)
        targets = in_set[1]
        if cur_iter == 0:
            writer.add_image('in_dist sample, target:[{}]'.format(targets[0]), in_set[0][0], cur_epoch)
            writer.add_image('out_dist sample', out_set[0][0], cur_epoch)
        data, targets = data.cuda(), targets.cuda()
        
        # Adjust Learning rate
        lr = optim.get_lr_at_epoch(op_cfg, cur_epoch + float(cur_iter) / in_data_size)
        optim.set_lr(optimizer, lr)
        
        # Foward propagation and Calculate loss and confidence
        logits = model(data)
        global_cfg['loss']['model'] = model
        global_cfg['loss']['data'] = data
        global_cfg['detector']['model'] = model
        global_cfg['detector']['data'] = data
        loss_dict = loss_func(logits, targets, global_cfg['loss'])
        loss = loss_dict['loss']
        confidences_dict = detector_func(logits, targets, global_cfg['detector'])
        confidences = confidences_dict['confidences']

        
        # Back propagation 
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        ## METRICS ##
        # Calculate classifier error about in-distribution sample
        num_topks_correct = metrics.topks_correct(logits[:len(targets)], targets, (1,))
        [top1_correct] = [x for x in num_topks_correct]
        
        # Calculate OOD metrics (auroc, aupr, fpr)
        #(auroc, aupr, fpr) = metrics.get_ood_measures(confidences, targets)
        
        # Add additional metrics!!!
        
        
        ## UDATE STATS ##
        loss, top1_correct = loss.item(), top1_correct.item()
        avg_loss += loss
        correct += top1_correct
        total += targets.size(0)
    
    summary = {
        'avg_loss': avg_loss / total,
        'classifier_acc': correct / total,
        'lr': optim.get_lr_at_epoch(op_cfg, cur_epoch),
        'epoch': cur_epoch,
    }
    
    return summary
Пример #10
0
def valid_epoch_wo_outlier(model, in_loader, cur_epoch):
    global global_cfg
    model.eval()
    avg_loss = 0
    correct = 0
    in_data_size = len(in_loader.dataset)
    for cur_iter, (x_tf_0, x_tf_90, x_tf_180, x_tf_270, targets) in enumerate(in_loader):
        
        batch_size = x_tf_0.shape[0]
        
        assert x_tf_0.shape[0] == \
            x_tf_90.shape[0] == \
            x_tf_180.shape[0] == \
            x_tf_270.shape[0] == \
            targets.shape[0]
#             x_tf_trans.shape[0] == \
#             target_trans_x.shape[0] == \
#             target_trans_y.shape[0] == \
            
        
        batch = np.concatenate((
            x_tf_0,
            x_tf_90,
            x_tf_180,
            x_tf_270
            #x_tf_trans
        ), 0)
        batch = torch.FloatTensor(batch).cuda()
        
        target_rots = torch.cat((
            torch.zeros(batch_size),
            torch.ones(batch_size),
            2 * torch.ones(batch_size),
            3 * torch.ones(batch_size)
        ), 0).long()      
        
        logits, pen = model(batch)
        
        classification_logits = logits[:batch_size]
        rot_logits            = model.rot_head(pen[:4*batch_size])
#         x_trans_logits        = net.x_trans_head(pen[4*batch_size:])
#         y_trans_logits        = net.y_trans_head(pen[4*batch_size:])
        
        classification_loss = F.cross_entropy(classification_logits, targets.cuda())
        rot_loss = F.cross_entropy(rot_logits, target_rots.cuda()) * global_cfg['loss']['rot_weight']
#         x_trans_loss = F.cross_entropy(x_trans_logits, target_trans_x.cuda()) * global_cfg['loss']['trans_weight']
#         y_trans_loss = F.cross_entropy(y_trans_logits, target_trans_y.cuda()) * global_cfg['loss']['trans_weight']
        
        #loss = classification_loss + ((rot_loss + x_trans_loss + y_trans_loss) / 3.0)
        loss = classification_loss + rot_loss
        
        # Calculate classifier error about in-distribution sample
        num_topks_correct = metrics.topks_correct(logits[:batch_size], targets.cuda(), (1,))
        [top1_correct] = [x for x in num_topks_correct]
        
        # Add additional metrics!!      
        loss, top1_correct = loss.item(), top1_correct.item()
        avg_loss += loss
        correct += top1_correct
    
    summary = {
        'avg_loss': avg_loss / in_data_size,
        'classifier_acc': correct / in_data_size,
        'epoch': cur_epoch,
    }
    
    return summary
Пример #11
0
def valid_epoch_w_outlier(model, in_loader, out_loader, loss_func,
                          detector_func, cur_epoch, logfile2):
    model.eval()
    global global_cfg
    avg_loss = 0
    correct = 0
    total = 0
    max_iter = 0
    avg_auroc = 0
    avg_aupr = 0
    avg_fpr = 0
    inlier_conf = 0
    outlier_conf = 0
    avg_acc = 0
    in_data_size = len(in_loader.dataset)
    inliers_conf = []
    outliers_conf = []
    for cur_iter, (in_set, out_set) in enumerate(zip(in_loader, out_loader)):
        # Data to GPU
        data = torch.cat((in_set[0], out_set[0]), 0)
        targets = in_set[1]
        data, targets = data.cuda(), targets.cuda()
        #print("in {} out {}".format(in_set[0].size(), out_set[0].size()))
        # Foward propagation and Calculate loss and confidence
        logits = model(data)

        global_cfg['loss']['model'] = model
        global_cfg['loss']['data'] = data
        global_cfg['detector']['model'] = model
        global_cfg['detector']['data'] = data
        loss_dict = loss_func(logits, targets, global_cfg['loss'])
        loss = loss_dict['loss']
        confidences_dict = detector_func(logits, targets,
                                         global_cfg['detector'])
        confidences = confidences_dict['confidences']

        ## METRICS ##
        # Calculate classifier error about in-distribution sample
        num_topks_correct = metrics.topks_correct(logits[:len(targets)],
                                                  targets, (1, ))
        [top1_correct] = [x for x in num_topks_correct]

        # Calculate OOD metrics (auroc, aupr, fpr)
        (auroc, aupr, fpr) = metrics.get_ood_measures(confidences, targets)

        # Add additional metrics!!!
        metrics.show_wrong_samples_targets(logits[:len(targets)], targets,
                                           logfile2)
        acc = metrics.classify_acc_w_ood(logits, targets, confidences)

        ## Update stats ##
        loss, top1_correct = loss.item(), top1_correct.item()
        avg_loss += loss
        correct += top1_correct
        total += targets.size(0)
        max_iter += 1
        avg_auroc += auroc
        avg_aupr += aupr
        avg_fpr += fpr
        inlier_conf += confidences_dict['inlier_mean']
        outlier_conf += confidences_dict['outlier_mean']
        inliers_conf.append(confidences[:len(targets)].squeeze(1).data.cpu())
        outliers_conf.append(confidences[len(targets):].squeeze(1).data.cpu())
        avg_acc += acc

    summary = {
        'avg_loss': avg_loss / total,
        'classifier_acc': correct / total,
        'AUROC': avg_auroc / max_iter,
        'AUPR': avg_aupr / max_iter,
        'FPR95': avg_fpr / max_iter,
        'inlier_confidence': inlier_conf / max_iter,
        'outlier_confidence': outlier_conf / max_iter,
        'inliers': torch.cat(inliers_conf).numpy(),
        'outliers': torch.cat(outliers_conf).numpy(),
        'acc': avg_acc / max_iter,
        'epoch': cur_epoch,
    }

    return summary
Пример #12
0
def valid_epoch_wo_outlier(model, in_loader, detector_func):
    global global_cfg
    model.eval()

    h_confidences_list = []
    f_confidences_list = []
    targets_list = []
    h_logits_list = []
    total = 0
    correct = 0

    in_data_size = len(in_loader.dataset)
    for cur_iter, in_set in enumerate(in_loader):
        # Data to GPU
        data = in_set[0]
        targets = in_set[1]
        data, targets = data.cuda(), targets.cuda()

        ### Input preprocessing
        inputs = Variable(data, requires_grad=True)
        _, h_logits = model(inputs)
        loss = torch.max(h_logits, dim=1).values.mean()
        loss.backward()

        gradient = torch.ge(inputs.grad.data, 0)
        gradient = (gradient.float() - 0.5) * 2
        gradient.index_copy_(
            1,
            torch.LongTensor([0]).cuda(),
            gradient.index_select(1,
                                  torch.LongTensor([0]).cuda()) / (0.2023))
        gradient.index_copy_(
            1,
            torch.LongTensor([1]).cuda(),
            gradient.index_select(1,
                                  torch.LongTensor([1]).cuda()) / (0.1994))
        gradient.index_copy_(
            1,
            torch.LongTensor([2]).cuda(),
            gradient.index_select(1,
                                  torch.LongTensor([2]).cuda()) / (0.2010))

        tempInputs = torch.add(data.data,
                               gradient,
                               alpha=-global_cfg['detector']['magnitude'])
        ###

        # Foward propagation and Calculate loss
        with torch.no_grad():
            f_logits, h_logits = model(Variable(tempInputs))

        global_cfg['detector']['model'] = model
        global_cfg['detector']['data'] = data
        h_confidences_dict = detector_func(h_logits, targets,
                                           global_cfg['detector'])
        f_confidences_dict = detector_func(f_logits, targets,
                                           global_cfg['detector'])

        h_confidences_list.append(h_confidences_dict['confidences'].cpu())
        f_confidences_list.append(f_confidences_dict['confidences'].cpu())
        targets_list.append(targets.cpu())
        h_logits_list.append(h_logits.cpu())

        num_topks_correct = metrics.topks_correct(h_logits[:len(targets)],
                                                  targets, (1, ))
        [top1_correct] = [x for x in num_topks_correct]
        top1_correct = top1_correct.item()
        correct += top1_correct
        total += targets.size(0)

    summary = {
        'classifier_acc': correct / total,
        'h_confidences': torch.cat(h_confidences_list, dim=0),  # (Bs, 1)
        'f_confidences': torch.cat(f_confidences_list, dim=0),
        'targets': torch.cat(targets_list, dim=0),  # (Bs,)
        'logits': torch.cat(h_logits_list, dim=0)  # (Bs, num_classes)
    }

    return summary
Пример #13
0
def train_epoch_wo_outlier(model, optimizer, in_loader, loss_func, cur_epoch,
                           op_cfg, writer):
    global global_cfg
    model.train()
    avg_loss = 0
    avg_sup_loss = 0
    avg_con_loss = 0
    correct = 0
    in_data_size = len(in_loader.dataset)
    for cur_iter, in_set in enumerate(in_loader):
        #TODO: Dimension of in_set and out_set should be checked!

        # Data to GPU
        data = in_set[0]
        if cur_iter == 0:
            writer.add_image('Original', data[0], cur_epoch)

        # Transform imgs
        data = np.transpose((data.numpy() * 255).round().astype(np.uint8),
                            (0, 2, 3, 1))
        images_aug = seq0(images=data)
        data0 = torch.from_numpy(
            np.transpose((np.stack(images_aug, 0).astype(np.float32) / 255.),
                         (0, 3, 1, 2)))
        images_aug = seq1(images=data)
        data1 = torch.from_numpy(
            np.transpose((np.stack(images_aug, 0).astype(np.float32) / 255.),
                         (0, 3, 1, 2)))
        if cur_iter == 0:
            writer.add_image('Transform0', data0[0], cur_epoch)
            writer.add_image('Transform1', data1[0], cur_epoch)

        data = torch.cat((data0, data1), 0)
        targets = in_set[1]
        data, targets = data.cuda(), targets.cuda()

        # Adjust Learning rate
        lr = optim.get_lr_at_epoch(op_cfg,
                                   cur_epoch + float(cur_iter) / in_data_size)
        optim.set_lr(optimizer, lr)

        # Foward propagation and Calculate loss
        (g_logits, h_logits, f_logits) = model(data, cur_epoch)
        # logits: (g, h)
        logits = (g_logits, h_logits)
        global_cfg['loss']['model'] = model
        global_cfg['loss']['data'] = data
        loss_dict = loss_func(logits, targets, global_cfg['loss'])
        loss = loss_dict['loss']
        sup_loss = loss_dict['sup_loss']
        con_loss = loss_dict['con_loss']

        # Back propagation
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # Calculate classifier error about in-distribution sample
        num_topks_correct = metrics.topks_correct(g_logits[:len(targets)],
                                                  targets, (1, ))
        [top1_correct] = [x for x in num_topks_correct]

        # Add additional metrics!!!

        loss, top1_correct = loss.item(), top1_correct.item()
        avg_loss += loss
        avg_sup_loss += sup_loss
        avg_con_loss += con_loss
        correct += top1_correct

    summary = {
        'avg_loss': avg_loss / in_data_size,
        'avg_sup_loss': avg_sup_loss / in_data_size,
        'avg_con_loss': avg_con_loss / in_data_size,
        'classifier_acc': correct / in_data_size,
        'lr': optim.get_lr_at_epoch(op_cfg, cur_epoch),
        'epoch': cur_epoch,
    }

    return summary
Пример #14
0
def valid_epoch_w_outlier(model,
                          in_loader,
                          out_loader,
                          loss_func,
                          detector_func,
                          cur_epoch,
                          logfile2,
                          attack_in=None,
                          attack_out=None):
    model.eval()
    global global_cfg
    avg_loss = 0
    correct = 0
    total = 0
    max_iter = 0
    avg_auroc = 0
    avg_aupr = 0
    avg_fpr = 0
    inlier_conf = 0
    outlier_conf = 0
    avg_acc = 0
    in_data_size = len(in_loader.dataset)
    inliers_conf = []
    outliers_conf = []
    f_logits_list = []
    targets_list = []
    for cur_iter, (in_set, out_set) in enumerate(zip(in_loader, out_loader)):
        in_data = in_set[0]
        out_data = out_set[0]
        targets = in_set[1].cuda()

        if attack_in is not None:
            adv_inputs = attack_in.perturb(in_data.cuda(), targets)
            in_data = adv_inputs.cuda()

        if attack_out is not None:
            adv_inputs = attack_out.perturb(out_data.cuda())
            out_data = adv_inputs.cuda()

        # Data to GPU
        data = torch.cat((in_data, out_data), 0)
        data = data.cuda()
        #print("in {} out {}".format(in_set[0].size(), out_set[0].size()))
        # Foward propagation and Calculate loss and confidence
        (g_logits, h_logits, f_logits) = model(data)
        f_logits_list.append(f_logits.data.cpu())
        ood_num = f_logits.size(0) - len(targets)
        ood_targets = torch.zeros(ood_num)
        ood_targets += g_logits.size(1)
        save_targets = torch.cat(
            (targets.data.cpu(), ood_targets.type(torch.LongTensor)), 0)
        targets_list.append(save_targets)
        loss = F.cross_entropy(g_logits[:len(targets)], targets)

        global_cfg['detector']['model'] = model
        global_cfg['detector']['data'] = data
        confidences_dict = detector_func(f_logits, targets,
                                         global_cfg['detector'])
        confidences = confidences_dict['confidences']

        ## METRICS ##
        # Calculate classifier error about in-distribution sample
        num_topks_correct = metrics.topks_correct(g_logits[:len(targets)],
                                                  targets, (1, ))
        [top1_correct] = [x for x in num_topks_correct]

        # Calculate OOD metrics (auroc, aupr, fpr)
        (auroc, aupr, fpr) = metrics.get_ood_measures(confidences, targets)

        # Add additional metrics!!!
        metrics.show_wrong_samples_targets(g_logits[:len(targets)], targets,
                                           logfile2)
        acc = metrics.classify_acc_w_ood(g_logits, targets, confidences)

        ## Update stats ##
        loss, top1_correct = loss.item(), top1_correct.item()
        avg_loss += loss
        correct += top1_correct
        total += targets.size(0)
        max_iter += 1
        avg_auroc += auroc
        avg_aupr += aupr
        avg_fpr += fpr
        inlier_conf += confidences_dict['inlier_mean']
        outlier_conf += confidences_dict['outlier_mean']
        inliers_conf.append(confidences[:len(targets)].squeeze(1).data.cpu())
        outliers_conf.append(confidences[len(targets):].squeeze(1).data.cpu())
        avg_acc += acc

    summary = {
        'avg_loss': avg_loss / total,
        'classifier_acc': correct / total,
        'AUROC': avg_auroc / max_iter,
        'AUPR': avg_aupr / max_iter,
        'FPR95': avg_fpr / max_iter,
        'inlier_confidence': inlier_conf / max_iter,
        'outlier_confidence': outlier_conf / max_iter,
        'inliers': torch.cat(inliers_conf).numpy(),
        'outliers': torch.cat(outliers_conf).numpy(),
        'acc': avg_acc / max_iter,
        'epoch': cur_epoch,
        'logits': torch.cat(f_logits_list, dim=0),
        'targets': torch.cat(targets_list, dim=0),  # (Bs,)
    }

    return summary
Пример #15
0
def train_epoch(
    train_loader, model, optimizer, train_meter, cur_epoch, cfg, writer=None
):
    """
    Perform the video training for one epoch.
    Args:
        train_loader (loader): video training loader.
        model (model): the video model to train.
        optimizer (optim): the optimizer to perform optimization on the model's
            parameters.
        train_meter (TrainMeter): training meters to log the training performance.
        cur_epoch (int): current epoch of training.
        cfg (CfgNode): configs. Details can be found in
            slowfast/config/defaults.py
        writer (TensorboardWriter, optional): TensorboardWriter object
            to writer Tensorboard log.
    """
    # Enable train mode.
    model.train()
    train_meter.iter_tic()
    data_size = len(train_loader)

    for cur_iter, (inputs, labels, _, meta) in enumerate(train_loader):
        # Transfer the data to the current GPU device.
        if isinstance(inputs, (list,)):
            for i in range(len(inputs)):
                inputs[i] = inputs[i].cuda(non_blocking=True)
        else:
            inputs = inputs.cuda(non_blocking=True)
        labels = labels.cuda()
        for key, val in meta.items():
            if isinstance(val, (list,)):
                for i in range(len(val)):
                    val[i] = val[i].cuda(non_blocking=True)
            else:
                meta[key] = val.cuda(non_blocking=True)

        # Update the learning rate.
        lr = optim.get_epoch_lr(cur_epoch + float(cur_iter) / data_size, cfg)
        optim.set_lr(optimizer, lr)

        if cfg.DETECTION.ENABLE:
            # Compute the predictions.
            preds = model(inputs, meta["boxes"])

        else:
            # Perform the forward pass.
            preds = model(inputs)
        # Explicitly declare reduction to mean.
        loss_fun = losses.get_loss_func(cfg.MODEL.LOSS_FUNC)(reduction="mean")

        # Compute the loss.
        loss = loss_fun(preds, labels)

        # check Nan Loss.
        misc.check_nan_losses(loss)

        # Perform the backward pass.
        optimizer.zero_grad()
        loss.backward()
        # Update the parameters.
        optimizer.step()

        if cfg.DETECTION.ENABLE:
            if cfg.NUM_GPUS > 1:
                loss = du.all_reduce([loss])[0]
            loss = loss.item()

            train_meter.iter_toc()
            # Update and log stats.
            train_meter.update_stats(None, None, None, loss, lr)
            # write to tensorboard format if available.
            if writer is not None:
                writer.add_scalars(
                    {"Train/loss": loss, "Train/lr": lr},
                    global_step=data_size * cur_epoch + cur_iter,
                )

        else:
            top1_err, top5_err = None, None
            if cfg.DATA.MULTI_LABEL:
                # Gather all the predictions across all the devices.
                if cfg.NUM_GPUS > 1:
                    [loss] = du.all_reduce([loss])
                loss = loss.item()
            else:
                # Compute the errors.
                num_topks_correct = metrics.topks_correct(
                    preds, labels, (1, 5))
                top1_err, top5_err = [
                    (1.0 - x / preds.size(0)) * 100.0 for x in num_topks_correct
                ]

                # Gather all the predictions across all the devices.
                if cfg.NUM_GPUS > 1:
                    loss, top1_err, top5_err = du.all_reduce(
                        [loss, top1_err, top5_err]
                    )

                # Copy the stats from GPU to CPU (sync point).
                loss, top1_err, top5_err = (
                    loss.item(),
                    top1_err.item(),
                    top5_err.item(),
                )

            train_meter.iter_toc()
            # Update and log stats.
            train_meter.update_stats(
                top1_err, top5_err, loss, lr, inputs[0].size(0) * cfg.NUM_GPUS
            )
            # write to tensorboard format if available.
            if writer is not None:
                writer.add_scalars(
                    {
                        "Train/loss": loss,
                        "Train/lr": lr,
                        "Train/Top1_err": top1_err,
                        "Train/Top5_err": top5_err,
                    },
                    global_step=data_size * cur_epoch + cur_iter,
                )

        train_meter.log_iter_stats(cur_epoch, cur_iter)
        train_meter.iter_tic()

    # Log epoch stats.
    train_meter.log_epoch_stats(cur_epoch)
    train_meter.reset()
Пример #16
0
def eval_epoch(val_loader, model, val_meter, cur_epoch, cfg, writer=None):
    """
    Evaluate the model on the val set.
    Args:
        val_loader (loader): data loader to provide validation data.
        model (model): model to evaluate the performance.
        val_meter (ValMeter): meter instance to record and calculate the metrics.
        cur_epoch (int): number of the current epoch of training.
        cfg (CfgNode): configs. Details can be found in
            slowfast/config/defaults.py
        writer (TensorboardWriter, optional): TensorboardWriter object
            to writer Tensorboard log.
    """

    # Evaluation mode enabled. The running stats would not be updated.
    model.eval()
    val_meter.iter_tic()

    for cur_iter, (inputs, labels, _, meta) in enumerate(val_loader):
        # Transferthe data to the current GPU device.
        if isinstance(inputs, (list,)):
            for i in range(len(inputs)):
                inputs[i] = inputs[i].cuda(non_blocking=True)
        else:
            inputs = inputs.cuda(non_blocking=True)
        labels = labels.cuda()
        for key, val in meta.items():
            if isinstance(val, (list,)):
                for i in range(len(val)):
                    val[i] = val[i].cuda(non_blocking=True)
            else:
                meta[key] = val.cuda(non_blocking=True)

        if cfg.DETECTION.ENABLE:
            # Compute the predictions.
            preds = model(inputs, meta["boxes"])

            preds = preds.cpu()
            ori_boxes = meta["ori_boxes"].cpu()
            metadata = meta["metadata"].cpu()

            if cfg.NUM_GPUS > 1:
                preds = torch.cat(du.all_gather_unaligned(preds), dim=0)
                ori_boxes = torch.cat(
                    du.all_gather_unaligned(ori_boxes), dim=0)
                metadata = torch.cat(du.all_gather_unaligned(metadata), dim=0)

            val_meter.iter_toc()
            # Update and log stats.
            val_meter.update_stats(
                preds.cpu(), ori_boxes.cpu(), metadata.cpu())

        else:
            preds = model(inputs)

            if cfg.DATA.MULTI_LABEL:
                if cfg.NUM_GPUS > 1:
                    preds, labels = du.all_gather([preds, labels])
            else:
                # Compute the errors.
                num_topks_correct = metrics.topks_correct(
                    preds, labels, (1, 5))

                # Combine the errors across the GPUs.
                top1_err, top5_err = [
                    (1.0 - x / preds.size(0)) * 100.0 for x in num_topks_correct
                ]
                if cfg.NUM_GPUS > 1:
                    top1_err, top5_err = du.all_reduce([top1_err, top5_err])

                # Copy the errors from GPU to CPU (sync point).
                top1_err, top5_err = top1_err.item(), top5_err.item()

                val_meter.iter_toc()
                # Update and log stats.
                val_meter.update_stats(
                    top1_err, top5_err, inputs[0].size(0) * cfg.NUM_GPUS
                )
                # write to tensorboard format if available.
                if writer is not None:
                    writer.add_scalars(
                        {"Val/Top1_err": top1_err, "Val/Top5_err": top5_err},
                        global_step=len(val_loader) * cur_epoch + cur_iter,
                    )

            val_meter.update_predictions(preds, labels)

        val_meter.log_iter_stats(cur_epoch, cur_iter)
        val_meter.iter_tic()

    # Log epoch stats.
    val_meter.log_epoch_stats(cur_epoch)
    # write to tensorboard format if available.
    if writer is not None:
        if cfg.DETECTION.ENABLE:
            writer.add_scalars(
                {"Val/mAP": val_meter.full_map}, global_step=cur_epoch
            )
        all_preds_cpu = [pred.clone().detach().cpu()
                         for pred in val_meter.all_preds]
        all_labels_cpu = [label.clone().detach().cpu()
                          for label in val_meter.all_labels]
        writer.plot_eval(
            preds=all_preds_cpu,
            labels=all_labels_cpu,
            global_step=cur_epoch,
        )

    val_meter.reset()
Пример #17
0
def train_epoch_wo_outlier(model, optimizer, in_loader, attack_in, cur_epoch, op_cfg, writer):
    global global_cfg
    model.train()
    avg_loss = 0
    correct = 0
    in_data_size = len(in_loader.dataset)
    for cur_iter, (x_tf_0, x_tf_90, x_tf_180, x_tf_270, targets) in enumerate(in_loader):
        
        batch_size = x_tf_0.shape[0]
        
        assert x_tf_0.shape[0] == \
            x_tf_90.shape[0] == \
            x_tf_180.shape[0] == \
            x_tf_270.shape[0] == \
            targets.shape[0]
            #x_tf_trans.shape[0] == \
            #target_trans_x.shape[0] == \
            #target_trans_y.shape[0] == \
            
        batch = np.concatenate((
            x_tf_0,
            x_tf_90,
            x_tf_180,
            x_tf_270
        ), 0)
        batch = torch.FloatTensor(batch).cuda()
        
        target_rots = torch.cat((
            torch.zeros(batch_size),
            torch.ones(batch_size),
            2 * torch.ones(batch_size),
            3 * torch.ones(batch_size)
        ), 0).long()
        
        if attack_in is not None:
            # Process PGD attack
            batch = attack_in.perturb(batch, batch_size, torch.cat((targets, target_rots), 0).cuda())
            batch = batch.cuda()
        
        if cur_iter == 0:
            writer.add_image('Original', batch[0], cur_epoch)
            writer.add_image('Rot90', batch[batch_size], cur_epoch)
            writer.add_image('Rot180', batch[batch_size * 2], cur_epoch)
            writer.add_image('Rot270', batch[batch_size * 3], cur_epoch)
        
         # Adjust Learning rate
        lr = optim.get_lr_at_epoch(op_cfg, cur_epoch + float(cur_iter) / in_data_size)
        optim.set_lr(optimizer, lr)
        
        logits, pen = model(batch)
        
        classification_logits = logits[:batch_size]
        rot_logits            = model.rot_head(pen[:4*batch_size])
        #x_trans_logits        = model.x_trans_head(pen[4*batch_size:])
        #y_trans_logits        = model.y_trans_head(pen[4*batch_size:])
        
        
        classification_loss = F.cross_entropy(classification_logits, targets.cuda())
        rot_loss = F.cross_entropy(rot_logits, target_rots.cuda()) * global_cfg['loss']['rot_weight']
#         x_trans_loss = F.cross_entropy(x_trans_logits, target_trans_x.cuda()) * global_cfg['loss']['trans_weight']
#         y_trans_loss = F.cross_entropy(y_trans_logits, target_trans_y.cuda()) * global_cfg['loss']['trans_weight']
        
        
        #loss = classification_loss + ((rot_loss + x_trans_loss + y_trans_loss) / 3.0)
        loss = classification_loss + rot_loss
        
        # Back propagation
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        # Calculate classifier error about in-distribution sample
        num_topks_correct = metrics.topks_correct(logits[:batch_size], targets.cuda(), (1,))
        [top1_correct] = [x for x in num_topks_correct]
        
        # Add additional metrics!!!
        
        loss, top1_correct = loss.item(), top1_correct.item()
        avg_loss += loss
        correct += top1_correct
    
    summary = {
        'avg_loss': avg_loss / in_data_size,
        'classifier_acc': correct / in_data_size,
        'lr': optim.get_lr_at_epoch(op_cfg, cur_epoch),
        'epoch': cur_epoch,
    }
    
    return summary
Пример #18
0
def valid_epoch_wo_outlier(model,
                           in_loader,
                           loss_func,
                           cur_epoch,
                           logfile2,
                           attack_in=None):
    global global_cfg
    model.eval()
    avg_loss = 0
    correct = 0
    total = 0
    features_list = []
    targets_list = []

    in_data_size = len(in_loader.dataset)
    for cur_iter, in_set in enumerate(in_loader):
        # Data to GPU
        data = in_set[0]
        targets = in_set[1].cuda()

        if attack_in is not None:
            adv_inputs = attack_in.perturb(data.cuda(), targets)
            data = adv_inputs.cuda()
        else:
            data, targets = data.cuda(), targets.cuda()
            # Foward propagation and Calculate loss

        (logits, features) = model(data)
        features_list.append(features.data.cpu())
        targets_list.append(targets.data.cpu())
        global_cfg['loss']['model'] = model
        global_cfg['loss']['data'] = data
        loss_dict = loss_func(logits, targets, global_cfg['loss'])
        loss = loss_dict['loss']

        # Calculate classifier error about in-distribution sample
        num_topks_correct = metrics.topks_correct(logits[:len(targets)],
                                                  targets, (1, ))
        [top1_correct] = [x for x in num_topks_correct]

        # Add additional metrics!!!
        metrics.show_wrong_samples_targets(logits[:len(targets)], targets,
                                           logfile2)

        loss, top1_correct = loss.item(), top1_correct.item()
        avg_loss += loss
        correct += top1_correct
        total += targets.size(0)

    logfile2.write(
        "Dataset size [{}] | Total samples [{}] | Correct samples [{}]\n".
        format(in_data_size, total, correct))
    summary = {
        'avg_loss': avg_loss / in_data_size,
        'classifier_acc': correct / in_data_size,
        'epoch': cur_epoch,
        'logits': torch.cat(features_list, dim=0),
        'targets': torch.cat(targets_list, dim=0),  # (Bs,)
    }

    return summary