def train_epoch_wo_outlier(model, optimizer, in_loader, loss_func, cur_epoch,
                           op_cfg, writer):
    global global_cfg
    model.train()
    avg_loss = 0
    correct = 0
    in_data_size = len(in_loader.dataset)
    for cur_iter, in_set in enumerate(in_loader):
        #TODO: Dimension of in_set and out_set should be checked!
        # Data to GPU
        data = in_set[0]
        targets = in_set[1]
        if cur_iter == 0:
            writer.add_image('in_dist target {}'.format(targets[0]), data[0],
                             cur_epoch)
        data, targets = data.cuda(), targets.cuda()

        # Adjust Learning rate
        lr = get_lr_at_epoch(op_cfg,
                             cur_epoch + float(cur_iter) / in_data_size)
        set_lr(optimizer, lr)

        # Foward propagation and Calculate loss
        logits = model(data)

        global_cfg['loss']['model'] = model
        global_cfg['loss']['data'] = data
        loss_dict = loss_func(logits, targets, global_cfg['loss'])
        loss = loss_dict['loss']

        # Back propagation
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # Calculate classifier error about in-distribution sample
        num_topks_correct = metrics.topks_correct(logits[:len(targets)],
                                                  targets, (1, ))
        [top1_correct] = [x for x in num_topks_correct]

        # Add additional metrics!!!

        loss, top1_correct = loss.item(), top1_correct.item()
        avg_loss += loss
        correct += top1_correct

    summary = {
        'avg_loss': avg_loss / in_data_size,
        'classifier_acc': correct / in_data_size,
        'lr': get_lr_at_epoch(op_cfg, cur_epoch),
        'epoch': cur_epoch,
    }

    return summary
Exemplo n.º 2
0
def valid_epoch_wo_outlier(model, in_loader, loss_func, cur_epoch):
    global global_cfg
    model.eval()
    avg_loss = 0
    correct = 0
    in_data_size = len(in_loader.dataset)
    for cur_iter, in_set in enumerate(in_loader):
        # Data to GPU
        data = in_set[0]
        targets = in_set[1]
        data, targets = data.cuda(), targets.cuda()

        # Foward propagation and Calculate loss
        logits = model(data)
        mins, maxs = model.get_min_max(data, range(1, 11))

        global_cfg['loss']['model'] = model
        global_cfg['loss']['data'] = data
        loss_dict = loss_func(logits, targets, global_cfg['loss'], mins, maxs)
        loss = loss_dict['loss']

        # Calculate classifier error about in-distribution sample
        num_topks_correct = metrics.topks_correct(logits[:len(targets)],
                                                  targets, (1, ))
        [top1_correct] = [x for x in num_topks_correct]

        # Add additional metrics!!

        loss, top1_correct = loss.item(), top1_correct.item()
        avg_loss += loss
        correct += top1_correct

    summary = {
        'avg_loss': avg_loss / in_data_size,
        'classifier_acc': correct / in_data_size,
        'epoch': cur_epoch,
    }

    return summary
def train_epoch_w_outlier(model, optimizer, in_loader, out_loader, loss_func,
                          detector_func, cur_epoch, op_cfg, writer):
    global global_cfg
    model.train()
    avg_loss = 0
    correct = 0
    total = 0
    in_data_size = len(in_loader.dataset)
    out_loader.dataset.offset = np.random.randint(len(out_loader.dataset))
    for cur_iter, (in_set, out_set) in enumerate(zip(in_loader, out_loader)):
        #TODO: Dimension of in_set and out_set should be checked!

        # Data to GPU
        data = torch.cat((in_set[0], out_set[0]), 0)
        targets = in_set[1]
        if cur_iter == 0:
            writer.add_image('in_dist sample, target:[{}]'.format(targets[0]),
                             in_set[0][0], cur_epoch)
            writer.add_image('out_dist sample', out_set[0][0], cur_epoch)
        data, targets = data.cuda(), targets.cuda()

        # Adjust Learning rate
        lr = optim.get_lr_at_epoch(op_cfg,
                                   cur_epoch + float(cur_iter) / in_data_size)
        optim.set_lr(optimizer, lr)

        # Foward propagation and Calculate loss and confidence
        logits = model(data)
        global_cfg['loss']['model'] = model
        global_cfg['loss']['data'] = data
        global_cfg['detector']['model'] = model
        global_cfg['detector']['data'] = data
        loss_dict = loss_func(logits, targets, global_cfg['loss'])
        loss = loss_dict['loss']
        confidences_dict = detector_func(logits, targets,
                                         global_cfg['detector'])
        confidences = confidences_dict['confidences']

        # Back propagation
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        ## METRICS ##
        # Calculate classifier error about in-distribution sample
        num_topks_correct = metrics.topks_correct(logits[:len(targets)],
                                                  targets, (1, ))
        [top1_correct] = [x for x in num_topks_correct]

        # Calculate OOD metrics (auroc, aupr, fpr)
        #(auroc, aupr, fpr) = metrics.get_ood_measures(confidences, targets)

        # Add additional metrics!!!

        ## UDATE STATS ##
        loss, top1_correct = loss.item(), top1_correct.item()
        avg_loss += loss
        correct += top1_correct
        total += targets.size(0)

    summary = {
        'avg_loss': avg_loss / total,
        'classifier_acc': correct / total,
        'lr': optim.get_lr_at_epoch(op_cfg, cur_epoch),
        'epoch': cur_epoch,
    }

    return summary
def valid_epoch_w_outlier(model, in_loader, out_loader, loss_func,
                          detector_func, cur_epoch):
    global global_cfg
    model.eval()
    avg_loss = 0
    correct = 0
    total = 0
    max_iter = 0
    avg_auroc = 0
    avg_aupr = 0
    avg_fpr = 0
    in_data_size = len(in_loader.dataset)
    for cur_iter, (in_set, out_set) in enumerate(zip(in_loader, out_loader)):
        # Data to GPU
        data = torch.cat((in_set[0], out_set[0]), 0)
        targets = in_set[1]
        data, targets = data.cuda(), targets.cuda()

        # Foward propagation and Calculate loss and confidence
        logits = model(data)
        global_cfg['loss']['model'] = model
        global_cfg['loss']['data'] = data
        global_cfg['detector']['model'] = model
        global_cfg['detector']['data'] = data
        loss_dict = loss_func(logits, targets, global_cfg['loss'])
        loss = loss_dict['loss']
        confidences_dict = detector_func(logits, targets,
                                         global_cfg['detector'])
        confidences = confidences_dict['confidences']

        ## METRICS ##
        # Calculate classifier error about in-distribution sample
        num_topks_correct = metrics.topks_correct(logits[:len(targets)],
                                                  targets, (1, ))
        [top1_correct] = [x for x in num_topks_correct]

        # Calculate OOD metrics (auroc, aupr, fpr)
        (auroc, aupr, fpr) = metrics.get_ood_measures(confidences, targets)

        # Add additional metrics!!!

        ## Update stats ##
        loss, top1_correct = loss.item(), top1_correct.item()
        avg_loss += loss
        correct += top1_correct
        total += targets.size(0)
        max_iter += 1
        avg_auroc += auroc
        avg_aupr += aupr
        avg_fpr += fpr

    summary = {
        'avg_loss': avg_loss / total,
        'classifier_acc': correct / total,
        'AUROC': avg_auroc / max_iter,
        'AUPR': avg_aupr / max_iter,
        'FPR95': avg_fpr / max_iter,
        'epoch': cur_epoch,
    }

    return summary