コード例 #1
0
ファイル: train_sovnni.py プロジェクト: emckwon/OOD-saige
def train_epoch_wo_outlier(model, optimizer, in_loader, loss_func, cur_epoch, op_cfg, writer):
    global global_cfg
    model.train()
    avg_loss = 0
    correct = 0
    in_data_size = len(in_loader.dataset)
    for cur_iter, in_set in enumerate(in_loader):
        #TODO: Dimension of in_set and out_set should be checked!
        # Data to GPU
        data = in_set[0]
        targets = in_set[1]
        if cur_iter == 0:
            writer.add_image('in_dist target {}'.format(targets[0]), data[0], cur_epoch)
        data, targets = data.cuda(), targets.cuda()

        # Adjust Learning rate
        lr = optim.get_lr_at_epoch(op_cfg, cur_epoch + float(cur_iter) / in_data_size)
        optim.set_lr(optimizer, lr)
        
        # Foward propagation and Calculate loss
        logits = model(data)
        
        (ava_logits, ova_logits) = logits
        
        #print(logits.size())
        global_cfg['loss']['model'] = model
        global_cfg['loss']['data'] = data
        loss_dict = loss_func(logits, targets, global_cfg['loss'])
        loss = loss_dict['loss']
        
        logits = F.softmax(ava_logits, dim=1)     
        
        # Back propagation
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        
        
        # Calculate classifier error about in-distribution sample
        num_topks_correct = metrics.topks_correct(logits[:len(targets)], targets, (1,))
        [top1_correct] = [x for x in num_topks_correct]
        
        # Add additional metrics!!!
        
        loss, top1_correct = loss.item(), top1_correct.item()
        avg_loss += loss
        correct += top1_correct
    
    summary = {
        'avg_loss': avg_loss / in_data_size,
        'classifier_acc': correct / in_data_size,
        'lr': optim.get_lr_at_epoch(op_cfg, cur_epoch),
        'epoch': cur_epoch,
    }
    
    return summary
コード例 #2
0
def train_epoch_wo_outlier(model, optimizer, in_loader, attack_in, cur_epoch, op_cfg, writer):
    global global_cfg
    model.train()
    avg_loss = 0
    correct = 0
    in_data_size = len(in_loader.dataset)
    for cur_iter, (x_tf_0, x_tf_90, x_tf_180, x_tf_270, targets) in enumerate(in_loader):
        
        batch_size = x_tf_0.shape[0]
        
        assert x_tf_0.shape[0] == \
            x_tf_90.shape[0] == \
            x_tf_180.shape[0] == \
            x_tf_270.shape[0] == \
            targets.shape[0]
            #x_tf_trans.shape[0] == \
            #target_trans_x.shape[0] == \
            #target_trans_y.shape[0] == \
            
        batch = np.concatenate((
            x_tf_0,
            x_tf_90,
            x_tf_180,
            x_tf_270
        ), 0)
        batch = torch.FloatTensor(batch).cuda()
        
        target_rots = torch.cat((
            torch.zeros(batch_size),
            torch.ones(batch_size),
            2 * torch.ones(batch_size),
            3 * torch.ones(batch_size)
        ), 0).long()
        
        if attack_in is not None:
            # Process PGD attack
            batch = attack_in.perturb(batch, batch_size, torch.cat((targets, target_rots), 0).cuda())
            batch = batch.cuda()
        
        if cur_iter == 0:
            writer.add_image('Original', batch[0], cur_epoch)
            writer.add_image('Rot90', batch[batch_size], cur_epoch)
            writer.add_image('Rot180', batch[batch_size * 2], cur_epoch)
            writer.add_image('Rot270', batch[batch_size * 3], cur_epoch)
        
         # Adjust Learning rate
        lr = optim.get_lr_at_epoch(op_cfg, cur_epoch + float(cur_iter) / in_data_size)
        optim.set_lr(optimizer, lr)
        
        logits, pen = model(batch)
        
        classification_logits = logits[:batch_size]
        rot_logits            = model.rot_head(pen[:4*batch_size])
        #x_trans_logits        = model.x_trans_head(pen[4*batch_size:])
        #y_trans_logits        = model.y_trans_head(pen[4*batch_size:])
        
        
        classification_loss = F.cross_entropy(classification_logits, targets.cuda())
        rot_loss = F.cross_entropy(rot_logits, target_rots.cuda()) * global_cfg['loss']['rot_weight']
#         x_trans_loss = F.cross_entropy(x_trans_logits, target_trans_x.cuda()) * global_cfg['loss']['trans_weight']
#         y_trans_loss = F.cross_entropy(y_trans_logits, target_trans_y.cuda()) * global_cfg['loss']['trans_weight']
        
        
        #loss = classification_loss + ((rot_loss + x_trans_loss + y_trans_loss) / 3.0)
        loss = classification_loss + rot_loss
        
        # Back propagation
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        # Calculate classifier error about in-distribution sample
        num_topks_correct = metrics.topks_correct(logits[:batch_size], targets.cuda(), (1,))
        [top1_correct] = [x for x in num_topks_correct]
        
        # Add additional metrics!!!
        
        loss, top1_correct = loss.item(), top1_correct.item()
        avg_loss += loss
        correct += top1_correct
    
    summary = {
        'avg_loss': avg_loss / in_data_size,
        'classifier_acc': correct / in_data_size,
        'lr': optim.get_lr_at_epoch(op_cfg, cur_epoch),
        'epoch': cur_epoch,
    }
    
    return summary
コード例 #3
0
def train_epoch_w_outlier(model, optimizer, in_loader, out_loader, loss_func, detector_func, cur_epoch, op_cfg, writer):
    global global_cfg
    model.train()
    avg_loss = 0
    correct = 0
    total = 0
    in_data_size = len(in_loader.dataset)
    out_loader.dataset.offset = np.random.randint(len(out_loader.dataset))
    for cur_iter, (in_set, out_set) in enumerate(zip(in_loader, out_loader)):
        #TODO: Dimension of in_set and out_set should be checked!
        
        # Data to GPU
        data = torch.cat((in_set[0], out_set[0]), 0)
        targets = in_set[1]
        if cur_iter == 0:
            writer.add_image('in_dist sample, target:[{}]'.format(targets[0]), in_set[0][0], cur_epoch)
            writer.add_image('out_dist sample', out_set[0][0], cur_epoch)
        data, targets = data.cuda(), targets.cuda()
        
        # Adjust Learning rate
        lr = optim.get_lr_at_epoch(op_cfg, cur_epoch + float(cur_iter) / in_data_size)
        optim.set_lr(optimizer, lr)
        
        # Foward propagation and Calculate loss and confidence
        logits = model(data)
        global_cfg['loss']['model'] = model
        global_cfg['loss']['data'] = data
        global_cfg['detector']['model'] = model
        global_cfg['detector']['data'] = data
        loss_dict = loss_func(logits, targets, global_cfg['loss'])
        loss = loss_dict['loss']
        confidences_dict = detector_func(logits, targets, global_cfg['detector'])
        confidences = confidences_dict['confidences']

        
        # Back propagation 
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        ## METRICS ##
        # Calculate classifier error about in-distribution sample
        num_topks_correct = metrics.topks_correct(logits[:len(targets)], targets, (1,))
        [top1_correct] = [x for x in num_topks_correct]
        
        # Calculate OOD metrics (auroc, aupr, fpr)
        #(auroc, aupr, fpr) = metrics.get_ood_measures(confidences, targets)
        
        # Add additional metrics!!!
        
        
        ## UDATE STATS ##
        loss, top1_correct = loss.item(), top1_correct.item()
        avg_loss += loss
        correct += top1_correct
        total += targets.size(0)
    
    summary = {
        'avg_loss': avg_loss / total,
        'classifier_acc': correct / total,
        'lr': optim.get_lr_at_epoch(op_cfg, cur_epoch),
        'epoch': cur_epoch,
    }
    
    return summary
コード例 #4
0
ファイル: train.py プロジェクト: tamwaiban/Open-PIFuhd
def train_epochs(model, optimizer, cfg, args, train_loader, test_loader,
                 resume_epoch, gallery_id):
    '''Training epoch method based on open mmlab
    Parameters:
        model: training model
        optimizer: optimizer for training
        cfg (CfgNode): configs. Details can be found in
            configs/PIFu_Render_People_HG.py
        args: option parameters
        train_loader: train dataloader iterator        
        test_loader: test dataloader file
        resume_epoch: resume epoch
        gallery_id: dir you save your gallery results
    Return:
        None
    '''
    best_iou = float('-inf')

    for epoch in range(resume_epoch, cfg.num_epoch):
        epoch_start_time = time.time()
        logger.info("training epoch {}".format(epoch))
        model.train()
        #define train_loss
        train_loss = AverageMeter()
        iter_data_time = time.time()
        for idx, data in enumerate(train_loader):
            iter_start_time = time.time()
            #adjust learning rate
            lr_epoch = epoch + idx / len(train_loader)
            optim.adjust_learning_rate(optimizer, lr_epoch, cfg)
            '''For PIFu,
            name: [B] img_name_list
            img: [B, C, H, W]
            calib: [B, 4, 4]
            samples: [B, C(x,y,z), N]
            labels: [B, 1, N]
            '''
            names = data['name']
            img = data['img'].cuda()
            calib = data['calib'].cuda()
            samples = data['samples'].cuda()
            labels = data['labels'].cuda()

            if cfg.use_front:
                front_normal = data['front_normal'].cuda()
                img = torch.cat([img, front_normal], dim=1)
            if cfg.use_back:
                back_normal = data['back_normal'].cuda()
                img = torch.cat([img, back_normal], dim=1)

            if cfg.fine_pifu:
                #collect fine pifu datasets
                crop_query_points = data['crop_query_points'].cuda()
                crop_img = data['crop_img']
                crop_front_normal = data['crop_front_normal']
                crop_back_normal = data['crop_back_normal']
                crop_imgs = torch.cat(
                    [crop_img, crop_front_normal, crop_back_normal],
                    dim=1).cuda()

            bs = img.shape[0]
            if not cfg.fine_pifu:
                preds, loss = model(images=img,
                                    calibs=calib,
                                    points=samples,
                                    labels=labels)
            else:
                preds, loss = model(images=img,
                                    calibs=calib,
                                    points=samples,
                                    labels=labels,
                                    crop_imgs=crop_imgs,
                                    crop_points_query=crop_query_points)
            #distributed learning
            #optimizer step
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if args.dist:
                loss = reduce_tensor(loss)
            train_loss.update(loss.item(), bs)

            #time calculate
            iter_net_time = time.time()
            eta = int(((iter_net_time - epoch_start_time) /
                       (idx + 1)) * len(train_loader) -
                      (iter_net_time - epoch_start_time))

            #training visible
            if idx % args.freq_plot == 0:
                word_handler = 'Name: {0} | Epoch: {1} | {2}/{3} | Err: {4:.06f} | LR: {5:.06f}  | dataT: {6:.05f} | netT: {7:.05f} | ETA: {8:02d}:{9:02d}:{10:02d}'.format(
                    cfg.name, epoch, idx, len(train_loader), train_loss.avg,
                    optim.get_lr_at_epoch(cfg, lr_epoch),
                    iter_start_time - iter_data_time,
                    iter_net_time - iter_start_time, int(eta // 3600),
                    int((eta % 3600) // 60), eta % 60)
                if (not args.dist) or dist.get_rank() == 0:
                    logger.info(word_handler)
            if idx != 0 and idx % args.freq_gallery == 0:
                #gallery save
                #points [1, N]
                save_gallery(preds, samples, names, gallery_id['train'], epoch)
            iter_data_time = time.time()
        if epoch > 0 and epoch % cfg.save_fre_epoch == 0:
            logger.info("save model: epoch {}!".format(epoch))
            save_checkpoints(model, epoch, optimizer, gallery_id['save_path'],
                             args)
        #test
        if epoch >= cfg.start_val_epoch and epoch % cfg.val_epoch == 0:
            test_metric = test_epoch(model, cfg, args, test_loader, epoch,
                                     gallery_id['test'])
            if best_iou < test_metric['iou']:
                best_iou = test_metric['iou']
                save_checkpoints(model,
                                 epoch,
                                 optimizer,
                                 gallery_id['save_path'],
                                 args,
                                 best=True)
コード例 #5
0
def train_epoch_wo_outlier(model, optimizer, in_loader, loss_func, cur_epoch,
                           op_cfg, writer):
    global global_cfg
    model.train()
    avg_loss = 0
    avg_sup_loss = 0
    avg_con_loss = 0
    correct = 0
    in_data_size = len(in_loader.dataset)
    for cur_iter, in_set in enumerate(in_loader):
        #TODO: Dimension of in_set and out_set should be checked!

        # Data to GPU
        data = in_set[0]
        if cur_iter == 0:
            writer.add_image('Original', data[0], cur_epoch)

        # Transform imgs
        data = np.transpose((data.numpy() * 255).round().astype(np.uint8),
                            (0, 2, 3, 1))
        images_aug = seq0(images=data)
        data0 = torch.from_numpy(
            np.transpose((np.stack(images_aug, 0).astype(np.float32) / 255.),
                         (0, 3, 1, 2)))
        images_aug = seq1(images=data)
        data1 = torch.from_numpy(
            np.transpose((np.stack(images_aug, 0).astype(np.float32) / 255.),
                         (0, 3, 1, 2)))
        if cur_iter == 0:
            writer.add_image('Transform0', data0[0], cur_epoch)
            writer.add_image('Transform1', data1[0], cur_epoch)

        data = torch.cat((data0, data1), 0)
        targets = in_set[1]
        data, targets = data.cuda(), targets.cuda()

        # Adjust Learning rate
        lr = optim.get_lr_at_epoch(op_cfg,
                                   cur_epoch + float(cur_iter) / in_data_size)
        optim.set_lr(optimizer, lr)

        # Foward propagation and Calculate loss
        (g_logits, h_logits, f_logits) = model(data, cur_epoch)
        # logits: (g, h)
        logits = (g_logits, h_logits)
        global_cfg['loss']['model'] = model
        global_cfg['loss']['data'] = data
        loss_dict = loss_func(logits, targets, global_cfg['loss'])
        loss = loss_dict['loss']
        sup_loss = loss_dict['sup_loss']
        con_loss = loss_dict['con_loss']

        # Back propagation
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # Calculate classifier error about in-distribution sample
        num_topks_correct = metrics.topks_correct(g_logits[:len(targets)],
                                                  targets, (1, ))
        [top1_correct] = [x for x in num_topks_correct]

        # Add additional metrics!!!

        loss, top1_correct = loss.item(), top1_correct.item()
        avg_loss += loss
        avg_sup_loss += sup_loss
        avg_con_loss += con_loss
        correct += top1_correct

    summary = {
        'avg_loss': avg_loss / in_data_size,
        'avg_sup_loss': avg_sup_loss / in_data_size,
        'avg_con_loss': avg_con_loss / in_data_size,
        'classifier_acc': correct / in_data_size,
        'lr': optim.get_lr_at_epoch(op_cfg, cur_epoch),
        'epoch': cur_epoch,
    }

    return summary
コード例 #6
0
ファイル: train_opengan.py プロジェクト: emckwon/OOD-saige
def train_epoch_wo_outlier(feature_extractor, G, D, G_optimizer, D_optimizer,
                           in_loader, cur_epoch, op_cfg, writer):
    global global_cfg
    D.train()
    G.train()
    feature_extractor.eval()
    avg_loss = 0
    correct = 0
    in_data_size = len(in_loader.dataset)
    real_feature = None
    real_target = None
    for cur_iter, in_set in enumerate(in_loader):
        #TODO: Dimension of in_set and out_set should be checked!

        # Data to GPU
        real_data = in_set[0]
        targets = in_set[1]
        if cur_iter == 0:
            writer.add_image('in_dist target {}'.format(targets[0]),
                             real_data[0], cur_epoch)
        real_data, targets = real_data.cuda(), targets.cuda()

        _, real_features = feature_extractor(real_data, -1)
        if cur_iter == 0:
            real_feature = real_features[0].unsqueeze(0)
            real_target = targets[0]
        # Adjust Learning rate
        lr = optim.get_lr_at_epoch(op_cfg,
                                   cur_epoch + float(cur_iter) / in_data_size)
        optim.set_lr(G_optimizer, lr)
        optim.set_lr(D_optimizer, lr)

        ###
        d_out_real, dr1, dr2 = D(real_data, real_features)
        if global_cfg['loss']['adv_loss'] == 'wgan-gp':
            d_loss_real = -torch.mean(d_out_real)
        elif global_cfg['loss']['adv_loss'] == 'hinge':
            d_loss_real = torch.nn.ReLU()(1.0 - d_out_real).mean()

        z = tensor2var(torch.randn(real_data.size(0), G.z_dim))
        fake_images, gf1, gf2 = G(z, real_features)
        d_out_fake, df1, df2 = D(fake_images, real_features)

        if global_cfg['loss']['adv_loss'] == 'wgan-gp':
            d_loss_fake = d_out_fake.mean()
        elif global_cfg['loss']['adv_loss'] == 'hinge':
            d_loss_fake = torch.nn.ReLU()(1.0 + d_out_fake).mean()

        # Backward + Optimize
        d_loss = d_loss_real + d_loss_fake

        if global_cfg['loss']['adv_loss'] == 'wgan-gp':
            d_loss += op_cfg['lambda_gp'] * compute_gradient_penalty(
                feature_extractor, D, real_data.data, fake_images.data)

        D_optimizer.zero_grad()
        G_optimizer.zero_grad()
        d_loss.backward()
        D_optimizer.step()

        # ================== Train G and gumbel ================== #
        # Create random noise
        z = tensor2var(torch.randn(real_data.size(0), G.z_dim))
        _, real_features = feature_extractor(real_data, -1)
        fake_images, _, _ = G(z, real_features)

        # Compute loss with fake images
        g_out_fake, _, _ = D(fake_images, real_features)  # batch x n
        _, fake_features = feature_extractor(fake_images, -1)

        if global_cfg['loss']['adv_loss'] == 'wgan-gp':
            g_loss_fake = -g_out_fake.mean()
        elif global_cfg['loss']['adv_loss'] == 'hinge':
            g_loss_fake = -g_out_fake.mean()

        g_loss_feature = F.mse_loss(fake_features, real_features)

        g_loss = g_loss_fake + g_loss_feature

        D_optimizer.zero_grad()
        G_optimizer.zero_grad()
        g_loss.backward()
        G_optimizer.step()
        ###

        # Add additional metrics!!!
        avg_loss += (d_loss + g_loss)

    ## Epoch
    # Print out log info

    summary = {
        'avg_loss': avg_loss / in_data_size,
        'lr': optim.get_lr_at_epoch(op_cfg, cur_epoch),
        'epoch': cur_epoch,
        'real_feature': real_feature,
        'real_target': real_target,
    }

    return summary