コード例 #1
0
def valid_trainer(model, valid_loader, criterion):
    model.eval()
    loss_meter = AverageMeter()

    preds_probs = []
    gt_list = []
    with torch.no_grad():
        for step, (imgs, gt_label, gt_depth,
                   imgname) in enumerate(tqdm(valid_loader)):
            imgs = imgs.cuda()
            gt_label = gt_label.cuda()
            gt_depth = gt_depth.cuda()
            gt_list.append(gt_label.cpu().numpy())
            gt_label[gt_label == -1] = 0
            valid_logits, _ = model(imgs)
            valid_loss = criterion(valid_logits, gt_label) + loss_autoencoder(
                _, gt_depth)
            valid_probs = torch.sigmoid(valid_logits)

            preds_probs.append(valid_probs.cpu().numpy())
            loss_meter.update(to_scalar(valid_loss))

    valid_loss = loss_meter.avg

    gt_label = np.concatenate(gt_list, axis=0)
    preds_probs = np.concatenate(preds_probs, axis=0)
    return valid_loss, gt_label, preds_probs
コード例 #2
0
def valid_trainer(model, valid_loader, criterion):
    model.eval()
    loss_meter = AverageMeter()

    preds_probs = []
    gt_list = []
    with torch.no_grad():
        for step, (imgs, gt_label, imgname) in enumerate(tqdm(valid_loader)):
            imgs = imgs.cuda()
            gt_label = gt_label.cuda()
            gt_list.append(gt_label.cpu().numpy())
            gt_label[gt_label == -1] = 0
            valid_logits = model(imgs)
            valid_loss = criterion(valid_logits, gt_label)
            #去除sigmoid for mcc
            valid_probs = torch.sigmoid(valid_logits)
            # valid_probs = valid_logits
            preds_probs.append(valid_probs.cpu().numpy())
            loss_meter.update(to_scalar(valid_loss))

    valid_loss = loss_meter.avg
    print(f'valid losss: {valid_loss}')

    gt_label = np.concatenate(gt_list, axis=0)
    preds_probs = np.concatenate(preds_probs, axis=0)
    return valid_loss, gt_label, preds_probs
コード例 #3
0
def valid_trainer(model, valid_loader, criterion):
    model.eval()
    loss_meter = AverageMeter()

    preds_probs = []
    gt_list = []
    with torch.no_grad():
        for step, (imgs, depth, gt_label, imgname) in enumerate(tqdm(valid_loader)):
            imgs = imgs.cuda()
            gt_label = gt_label.cuda()
            gt_list.append(gt_label.cpu().numpy())
            gt_label[gt_label == -1] = 0
            valid_logits = model(imgs, depth)
            #valid_logits = model(imgs, gt_label)
            #valid_logits, output_depth_0,output_depth_1,output_depth_2 = model(imgs, depth)
            #valid_logits, depth_logits = model(imgs, depth)
            
            valid_loss = criterion(valid_logits, gt_label)
            valid_probs = torch.sigmoid(valid_logits)
            preds_probs.append(valid_probs.cpu().numpy())
            loss_meter.update(to_scalar(valid_loss))
            #show_on_image(imgname, output_0)      
            #vif(imgname,output_depth_0,output_depth_1,output_depth_2,output_depth_3, output_depth_4, output_depth_5)
            #return 0
    valid_loss = loss_meter.avg

    gt_label = np.concatenate(gt_list, axis=0)
    preds_probs = np.concatenate(preds_probs, axis=0)
    return valid_loss, gt_label, preds_probs
コード例 #4
0
def batch_trainer(epoch, model, train_loader, criterion, optimizer):
    model.train()
    epoch_time = time.time()
    loss_meter = AverageMeter()

    batch_num = len(train_loader)
    gt_list = []
    preds_probs = []

    lr = optimizer.param_groups[1]['lr']

    for step, (imgs, gt_label, imgname) in enumerate(train_loader):

        batch_time = time.time()
        imgs, gt_label = imgs.cuda(), gt_label.cuda()

        feat_map, output = model(imgs)
        loss_list = []
        for k in range(len(output)):
            out = output[k]
            loss_list.append(criterion(out, gt_label))
        loss = sum(loss_list)

        #maximum voting
        output = torch.max(
            torch.max(torch.max(torch.max(output[0], output[1]), output[2]),
                      output[3]), output[4])

        train_loss = loss

        optimizer.zero_grad()
        train_loss.backward()
        clip_grad_norm_(model.parameters(),
                        max_norm=10.0)  # make larger learning rate works
        optimizer.step()

        loss_meter.update(to_scalar(train_loss))

        gt_list.append(gt_label.cpu().numpy())
        train_probs = torch.sigmoid(output)
        preds_probs.append(train_probs.detach().cpu().numpy())

        log_interval = 20
        if (step + 1) % log_interval == 0 or (step +
                                              1) % len(train_loader) == 0:
            print(
                f'{time_str()}, Step {step}/{batch_num} in Ep {epoch}, {time.time() - batch_time:.2f}s ',
                f'train_loss:{loss_meter.val:.4f}')

    train_loss = loss_meter.avg

    gt_label = np.concatenate(gt_list, axis=0)
    preds_probs = np.concatenate(preds_probs, axis=0)

    print(
        f'Epoch {epoch}, LR {lr}, Train_Time {time.time() - epoch_time:.2f}s, Loss: {loss_meter.avg:.4f}'
    )

    return train_loss, gt_label, preds_probs
コード例 #5
0
def batch_trainer(epoch, model, train_loader, criterion, optimizer, loss):
    model.train()
    epoch_time = time.time()
    loss_meter = AverageMeter()

    batch_num = len(train_loader)
    gt_list = []
    preds_probs = []

    lr = optimizer.param_groups[0]['lr']

    for step, (imgs, gt_label, imgname) in enumerate(train_loader):

        batch_time = time.time()
        imgs, gt_label = imgs.cuda(), gt_label.cuda()

        train_logit_1, train_logit_2, train_logit_3, train_logit_4 = model(
            imgs)

        if loss == 'Multi_Level_Loss':
            train_loss = 0.1 * criterion(
                train_logit_1, gt_label) + 0.3 * criterion(
                    train_logit_2, gt_label) + 0.7 * criterion(
                        train_logit_3, gt_label) + criterion(
                            train_logit_4, gt_label)

        train_loss.backward()
        clip_grad_norm_(model.parameters(),
                        max_norm=10.0)  # make larger learning rate works
        optimizer.step()
        optimizer.zero_grad()
        loss_meter.update(to_scalar(train_loss))

        gt_list.append(gt_label.cpu().numpy())
        train_probs = torch.sigmoid(train_logit_4)
        #train_probs_2 = torch.sigmoid(train_logit_2)
        #train_probs_3 = torch.sigmoid(train_logit_3)
        #train_probs_4 = torch.sigmoid(train_logit_4)
        #train_max = (train_probs + train_probs_2)/2
        #preds_probs.append(train_max.detach().cpu().numpy())
        preds_probs.append(train_probs.detach().cpu().numpy())
        log_interval = 20

        if (step + 1) % log_interval == 0 or (step +
                                              1) % len(train_loader) == 0:
            print(
                f'{time_str()}, Step {step}/{batch_num} in Ep {epoch}, {time.time() - batch_time:.2f}s ',
                f'train_loss:{loss_meter.val:.4f}')

    train_loss = loss_meter.avg

    gt_label = np.concatenate(gt_list, axis=0)
    preds_probs = np.concatenate(preds_probs, axis=0)

    print(
        f'Epoch {epoch}, LR {lr}, Train_Time {time.time() - epoch_time:.2f}s, Loss: {loss_meter.avg:.4f}'
    )

    return train_loss, gt_label, preds_probs
コード例 #6
0
def valid_trainer(model, valid_loader, criterion):
    model.eval()
    loss_meter = AverageMeter()

    preds_probs = []
    gt_list = []

    with torch.no_grad():
        for step, (imgs, gt_label, imgname) in enumerate(tqdm(valid_loader)):

            imgs = imgs.cuda()
            gt_label = gt_label.cuda()
            valid_logit_1, valid_logit_2, valid_logit_3, valid_logit_4 = model(
                imgs)

            #pdb.set_trace()
            gt_list.append(gt_label.cpu().numpy())

            gt_label[gt_label == -1] = 0
            #valid_logits, cha_att, spa_att = model(imgs)

            #pdb.set_trace()

            #valid_loss = 0
            valid_loss = criterion(valid_logit_4, gt_label)
            valid_probs = torch.sigmoid(valid_logit_4)
            #valid_probs_2 = torch.sigmoid(valid_logit_2)
            #valid_probs_3 = torch.sigmoid(valid_logit_3)
            #valid_probs_4 = torch.sigmoid(valid_logit_4)
            #pdb.set_trace()

            #pred_max = (valid_probs + valid_probs_2)/2
            #preds_probs.append(pred_max.cpu().numpy())
            preds_probs.append(valid_probs.detach().cpu().numpy())
            loss_meter.update(to_scalar(valid_loss))

            #show_filter(imgname, gt_label, valid_logit_2)
            #pdb.set_trace()
            #nmf_show(imgname, feature_map)
            #show_att(imgname, mask,mask )
            #affine(imgname, theta)
            #vif(imgname, valid_logit_4, valid_logit_4)
            #return 0
            #get_mask_block(imgname, gt_label, valid_logit_l, valid_logit_3, valid_logit_2)
            #get_att(imgname, gt_label, valid_logit_2, valid_logit_l)
            #pdb.set_trace()
            #get_detector(gt_label, valid_logit_4,valid_probs_2, valid_logit_3)
    #np.save('part_detector.py', part_detector)
    valid_loss = loss_meter.avg
    gt_label = np.concatenate(gt_list, axis=0)
    preds_probs = np.concatenate(preds_probs, axis=0)
    #save_part()
    return valid_loss, gt_label, preds_probs
コード例 #7
0
def batch_trainer(epoch, model, train_loader, criterion, optimizer, loss):
    model.train()
    epoch_time = time.time()
    loss_meter = AverageMeter()

    batch_num = len(train_loader)
    gt_list = []
    preds_probs = []

    lr = optimizer.param_groups[0]['lr']
    for step, (imgs, gt_label, gt_depth, imgname) in enumerate(train_loader):
        #print(step)
        batch_time = time.time()
        imgs, gt_label, gt_depth = imgs.cuda(), gt_label.cuda(), gt_depth.cuda(
        )
        train_logits, _ = model(imgs)
        #print("sssssssssssssssssssssssssssss")
        #pdb.set_trace()
        if loss == 'BCE_LOSS':
            train_loss = criterion(train_logits, gt_label) + loss_autoencoder(
                _, gt_depth)
        train_loss.backward()
        clip_grad_norm_(model.parameters(),
                        max_norm=10.0)  # make larger learning rate works
        optimizer.step()
        optimizer.zero_grad()
        loss_meter.update(to_scalar(train_loss))

        gt_list.append(gt_label.cpu().numpy())
        train_probs = torch.sigmoid(train_logits)
        preds_probs.append(train_probs.detach().cpu().numpy())

        log_interval = 20
        if (step + 1) % log_interval == 0 or (step +
                                              1) % len(train_loader) == 0:
            print(
                f'{time_str()}, Step {step}/{batch_num} in Ep {epoch}, {time.time() - batch_time:.2f}s ',
                f'train_loss:{loss_meter.val:.4f}')

    train_loss = loss_meter.avg

    gt_label = np.concatenate(gt_list, axis=0)
    preds_probs = np.concatenate(preds_probs, axis=0)

    print(
        f'Epoch {epoch}, LR {lr}, Train_Time {time.time() - epoch_time:.2f}s, Loss: {loss_meter.avg:.4f}'
    )

    return train_loss, gt_label, preds_probs
コード例 #8
0
def valid_trainer(model, valid_loader, criterion):
    model.eval()
    loss_meter = AverageMeter()

    preds_probs = []
    gt_list = []
    with torch.no_grad():
        for step, (imgs, gt_label, imgname) in enumerate(tqdm(valid_loader)):
            imgs = imgs.cuda()
            gt_label = gt_label.cuda()
            gt_list.append(gt_label.cpu().numpy())
            gt_label[gt_label == -1] = 0
            #valid_logits = model(imgs)
            valid_logits, valid_logits_2 = model(imgs)
            #pdb.set_trace()
            #valid_logits = model(imgs)
            #valid_loss = criterion(valid_logits, gt_label) #+ criterion(valid_logits_2, gt_label)
            #valid_loss = criterion(valid_logits, gt_label) #+ F.kl_div(torch.mean(valid_logits_2.squeeze(), 0)[0], torch.from_numpy(label_att).float().cuda(), reduction='sum')
            valid_loss = criterion(
                valid_logits_2, gt_label
            )  #+criterion(valid_logits_2, gt_label) #+ 0.3*mse_loss_fn(torch.mean(valid_logits_2.squeeze()[0], 0), torch.from_numpy(label_att).float().cuda())
            valid_probs = torch.sigmoid(valid_logits_2)
            '''
            valid_probs_2 = torch.sigmoid(valid_logits_2)
            #pdb.set_trace()
            # accessory
            
            valid_prob = valid_probs > 0.5
            for i in range(valid_logits.size()[0]):
                if  (valid_prob[i,15] and valid_prob[i,16]):
                    print(str(valid_probs[i,15]) + ' '+str(valid_probs[i,16]))              
            '''
            preds_probs.append(valid_probs.cpu().numpy())
            loss_meter.update(to_scalar(valid_loss))
            #show_on_image(imgname, output)
            #vif(imgname,output_depth_0,output_depth_1,output_depth_2,output_depth_3, output_depth_4, output_depth_5)
            #return 0

    valid_loss = loss_meter.avg

    gt_label = np.concatenate(gt_list, axis=0)
    preds_probs = np.concatenate(preds_probs, axis=0)
    return valid_loss, gt_label, preds_probs
コード例 #9
0
def valid_trainer(model, valid_loader, criterion):
    model.eval()
    grad_cam = GradCam(model=model,
                       target_layer_names=["layer4"],
                       use_cuda=True)

    loss_meter = AverageMeter()

    preds_probs = []
    gt_list = []
    if True:
        #with torch.no_grad():
        for step, (imgs, gt_label, imgname) in enumerate(tqdm(valid_loader)):
            #pdb.set_trace()
            imgs = imgs.cuda()
            gt_label = gt_label.cuda()
            gt_list.append(gt_label.cpu().numpy())
            #gt_list.append(gt_label[:, 6:].cpu().numpy())
            gt_label[gt_label == -1] = 0
            valid_logits = model(imgs)

            #mask_cam = grad_cam(imgs, 22)#mask: bs, 256, 192
            valid_loss = criterion(
                valid_logits, gt_label
            )  #+ 0.2*(torch.sum(torch.abs(cha_att))+torch.sum(torch.abs(spa_att)))
            #valid_loss = criterion(valid_logits * (mask.expand_as(valid_logits)), gt_label * (mask.expand_as(gt_label)))#+ 0.2*(torch.sum(torch.abs(cha_att))+torch.sum(torch.abs(spa_att)))
            valid_probs = torch.sigmoid(valid_logits)
            #valid_probs = torch.sigmoid(valid_logits[:, 6:])
            preds_probs.append(valid_probs.detach().cpu().numpy())
            loss_meter.update(to_scalar(valid_loss))
            #show_att(imgname, spa_att,spa_att )
            #affine(imgname, theta)
            #vif(imgname, spa_att, spa_att)
            #show_on_image(imgname, mask_cam, 22, gt_label)
            #return 0
    valid_loss = loss_meter.avg

    gt_label = np.concatenate(gt_list, axis=0)
    preds_probs = np.concatenate(preds_probs, axis=0)
    return valid_loss, gt_label, preds_probs
コード例 #10
0
def valid_trainer(epoch, model, valid_loader, criterion):
    model.eval()
    loss_meter = AverageMeter()

    preds_probs = []
    gt_list = []

    with torch.no_grad():
        for step, (imgs, gt_label, imgname) in enumerate(tqdm(valid_loader)):
            imgs = imgs.cuda()
            gt_label = gt_label.cuda()
            gt_list.append(gt_label.cpu().numpy())
            gt_label[gt_label == -1] = 0

            output = model(imgs)

            loss_list = []
            for k in range(len(output)):
                out = output[k]
                loss_list.append(criterion(out, gt_label))
            loss = sum(loss_list)
            # maximum voting
            output = torch.max(
                torch.max(
                    torch.max(torch.max(output[0], output[1]), output[2]),
                    output[3]), output[4])

            valid_loss = loss

            valid_probs = torch.sigmoid(output)
            preds_probs.append(valid_probs.detach().cpu().numpy())
            loss_meter.update(to_scalar(valid_loss))

    valid_loss = loss_meter.avg

    gt_label = np.concatenate(gt_list, axis=0)
    preds_probs = np.concatenate(preds_probs, axis=0)

    return valid_loss, gt_label, preds_probs
コード例 #11
0
def batch_trainer(epoch, model, train_loader, criterion, optimizer, loss):
    model.train()
    epoch_time = time.time()
    loss_meter = AverageMeter()

    batch_num = len(train_loader)
    gt_list = []
    preds_probs = []

    lr = optimizer.param_groups[1]['lr']

    for step, (imgs, gt_label, imgname) in enumerate(train_loader):
        #print(step)
        if step >= 319:
            continue
        batch_time = time.time()
        imgs, gt_label = imgs.cuda(), gt_label.cuda()
        train_logits, train_logits_2 = model(imgs, gt_label)
        #train_logits = model(imgs, gt_label)
        if loss == 'KL_LOSS':
            sim = np.load('src/sim.npy')
            cls_weight = model.state_dict(
            )['module.classifier.logits.0.weight']
            #pdb.set_trace()
            cls_weight_t = torch.transpose(cls_weight, 1, 0)
            cls = torch.mm(cls_weight, cls_weight_t)
            cls = torch.triu(cls, 1).view(-1)
            sim = torch.from_numpy(sim).float().cuda(non_blocking=True)
            #pdb.set_trace()
            sim = torch.triu(sim, 1).view(-1)
            #pdb.set_trace()
            kl_mean = F.kl_div(cls.softmax(dim=-1).log(),
                               sim.softmax(dim=-1),
                               reduction='sum')

            train_loss = criterion(train_logits, gt_label) + kl_mean

        if loss == 'KL2_LOSS':
            sim = np.load('src/sim.npy')
            cls_weight = l2_norm(train_logits, 0)
            cls_weight_t = torch.transpose(cls_weight, 1, 0)

            cls = torch.mm(cls_weight_t, cls_weight)
            cls = torch.triu(cls, 1).view(-1)
            sim = torch.from_numpy(sim).float().cuda(non_blocking=True)
            sim = torch.triu(sim, 1).view(-1)
            #pdb.set_trace()
            kl_mean = F.kl_div(cls.softmax(dim=-1).log(),
                               sim.softmax(dim=-1),
                               reduction='sum')

            train_loss = criterion(train_logits, gt_label) + kl_mean
        if loss == 'BCE_LOSS':

            train_loss = criterion(
                train_logits, gt_label)  #+ criterion(train_logits_2, gt_label)
        if loss == 'MMA_LOSS':
            #pdb.set_trace()
            #label_att = np.load('tools/att_label.npy')
            '''
            for name, m in model.named_modules():
                #print(name)
                #'name' can be used to exclude some specified layers
                if name=='module.classifier.fc_1':
                    #pdb.set_trace()
                    #cal_corre  = np.corrcoef(m.weight.cpu().detach().numpy())
                    #cal_corre = torch.from_numpy(cal_corre).float().cuda()
                    #weight_ = F.normalize(m.weight, p=2, dim=1)                 
                    #cosine = torch.matmul(weight_, weight_.t())
                    
                    mma_2 = mse_loss_fn((m.weight).view(-1), ((torch.from_numpy(fc_loss).float().cuda())).view(-1))
                    #mma = twin_loss(m.weight.cuda(), torch.from_numpy(fc_loss).float().cuda())
            #pdb.set_trace()    
            '''
            #train_loss = criterion(train_logits, gt_label) + F.kl_div(torch.mean(train_logits_2.squeeze(), 0)[0], torch.from_numpy(label_att).float().cuda(), reduction='sum')
            #train_loss = criterion(train_logits, gt_label) + 0.3*mse_loss_fn(torch.mean(train_logits_2.squeeze(), 0)[0], torch.from_numpy(label_att).float().cuda())
            train_loss = criterion(
                train_logits_2, gt_label
            )  #+  mma_2 + criterion(train_logits_2, gt_label) #+ 20*mma_2
            '''
            train_pro =(torch.sigmoid(train_logits)>0.5).float()
            train_loss_2 = torch.matmul((train_loss_2*train_pro).t(), train_loss_2*train_pro)*mask
            train_loss = 0.5*torch.sum(train_loss_2)/train_logits.size()[0] + train_loss_1
            '''
        train_loss.backward()
        clip_grad_norm_(model.parameters(),
                        max_norm=10.0)  # make larger learning rate works
        optimizer.step()
        optimizer.zero_grad()
        loss_meter.update(to_scalar(train_loss))

        gt_list.append(gt_label.cpu().numpy())

        #train_probs = torch.sigmoid(train_logits)
        train_probs_2 = torch.sigmoid(train_logits_2)

        preds_probs.append(train_probs_2.detach().cpu().numpy())

        log_interval = 20
        if (step + 1) % log_interval == 0 or (step +
                                              1) % len(train_loader) == 0:
            print(
                f'{time_str()}, Step {step}/{batch_num} in Ep {epoch}, {time.time() - batch_time:.2f}s ',
                f'train_loss:{loss_meter.val:.4f}')

    train_loss = loss_meter.avg

    gt_label = np.concatenate(gt_list, axis=0)
    preds_probs = np.concatenate(preds_probs, axis=0)

    print(
        f'Epoch {epoch}, LR {lr}, Train_Time {time.time() - epoch_time:.2f}s, Loss: {loss_meter.avg:.4f}'
    )

    return train_loss, gt_label, preds_probs
def valid_trainer(args,
                  model,
                  ema_model,
                  valid_loader,
                  criterion,
                  loss_w=[
                      1,
                  ]):
    model.eval()
    loss_meter = AverageMeter()
    loss_meter_ema = AverageMeter()

    cls_l_meter = AverageMeter()
    inter_l_meter = AverageMeter()
    intra_l_meter = AverageMeter()

    preds_probs = []
    preds_probs_ema = []
    gt_list = []
    imgname_list = []

    with torch.no_grad():
        for step, (imgs, gt_label, imgname) in enumerate(tqdm(valid_loader)):
            imgs = imgs.cuda()
            gt_label = gt_label.cuda()
            gt_list.append(gt_label.cpu().numpy())
            gt_label[gt_label == -1] = 0

            with autocast():
                valid_logits, feat = model(imgs, gt_label)
                valid_logits_ema, feat_ema = ema_model(imgs, gt_label)

            loss_list = criterion(valid_logits, gt_label, feat)
            loss_list_ema = criterion(valid_logits_ema, gt_label, feat_ema)

            valid_loss = loss_list[0]
            valid_probs = torch.sigmoid(valid_logits)
            preds_probs.append(valid_probs.cpu().numpy())

            valid_loss_ema = loss_list_ema[0]
            valid_probs_ema = torch.sigmoid(valid_logits_ema)
            preds_probs_ema.append(valid_probs_ema.cpu().numpy())

            # if args.distributed:
            #     if len(loss_list) > 1:
            #         cls_l_meter.update(to_scalar(reduce_tensor(loss_list[0], args.world_size)))
            #         inter_l_meter.update(to_scalar(reduce_tensor(loss_list[1], args.world_size)))
            #     loss_meter.update(to_scalar(reduce_tensor(valid_loss, args.world_size)))
            # else:
            if len(loss_list) > 1:
                cls_l_meter.update(to_scalar(loss_list[0]))
                inter_l_meter.update(to_scalar(loss_list[1]))
            loss_meter.update(to_scalar(valid_loss))
            loss_meter_ema.update(to_scalar(valid_loss_ema))

            torch.cuda.synchronize()

            imgname_list.append(imgname)

    valid_loss = loss_meter.avg

    if args.local_rank == 0:
        print(
            f'cls_loss:{cls_l_meter.val:.4f}, inter_loss:{inter_l_meter.val:.4f}, '
            f'intra_loss:{intra_l_meter.val:.4f}')

    gt_label = np.concatenate(gt_list, axis=0)
    preds_probs = np.concatenate(preds_probs, axis=0)
    preds_probs_ema = np.concatenate(preds_probs_ema, axis=0)

    return valid_loss, gt_label, preds_probs, preds_probs_ema, imgname_list
def batch_trainer(cfg,
                  args,
                  epoch,
                  model,
                  model_ema,
                  train_loader,
                  criterion,
                  optimizer,
                  loss_w=[
                      1,
                  ],
                  scheduler=None):
    model.train()
    epoch_time = time.time()
    loss_meter = AverageMeter()

    batch_num = len(train_loader)
    gt_list = []
    preds_probs = []
    imgname_list = []

    # lr = optimizer.param_groups[1]['lr']

    scaler = GradScaler()
    for step, (imgs, gt_label, imgname) in enumerate(train_loader):

        batch_time = time.time()
        imgs, gt_label = imgs.cuda(), gt_label.cuda()
        with autocast():
            train_logits, feat = model(imgs, gt_label)
        loss_list = criterion(train_logits, gt_label, feat, epoch)
        train_loss = loss_list[0]
        optimizer.zero_grad()

        scaler.scale(train_loss).backward()
        scaler.step(optimizer)
        scaler.update()

        if scheduler:
            scheduler.step()

        # train_loss.backward()
        # if cfg.TRAIN.CLIP_GRAD:
        #     clip_grad_norm_(model.parameters(), max_norm=10.0)  # make larger learning rate works
        # optimizer.step()

        if model_ema is not None:
            model_ema.update(model)

        torch.cuda.synchronize()

        # if args.distributed:
        #     reduced_loss = reduce_tensor(train_loss.data, args.world_size)
        #     loss_meter.update(to_scalar(reduced_loss))
        # else:

        loss_meter.update(to_scalar(train_loss))

        gt_list.append(gt_label.cpu().numpy())
        train_probs = train_logits.sigmoid()
        preds_probs.append(train_probs.detach().cpu().numpy())
        imgname_list.append(imgname)

        log_interval = 100

        if step % log_interval == 0 or step % len(train_loader) == 0:
            if args.local_rank == 0:
                print(
                    f'{time_str()}, Step {step}/{batch_num} in Ep {epoch}, LR {optimizer.param_groups[1]["lr"]:.1e}, {time.time() - batch_time:.2f}s ',
                    f'train_loss:{loss_meter.val:.4f}',
                    f'train_avg_loss:{loss_meter.avg:.4f}')

    train_loss = loss_meter.avg

    gt_label = np.concatenate(gt_list, axis=0)
    preds_probs = np.concatenate(preds_probs, axis=0)

    if args.local_rank == 0:
        print(
            f'Epoch {epoch}, LR {optimizer.param_groups[1]["lr"]:.1e}, Train_Time {time.time() - epoch_time:.2f}s, Loss: {loss_meter.val:.4f}'
        )

    return train_loss, gt_label, preds_probs, imgname_list
コード例 #14
0
def batch_trainer(epoch, model, train_loader, criterion, optimizer, loss):
    model.train()
    epoch_time = time.time()
    loss_meter = AverageMeter()

    batch_num = len(train_loader)
    gt_list = []
    preds_probs = []

    lr = optimizer.param_groups[0]['lr']

    for step, (img_l, img_s, img_lf, img_sf, gt_label,img_mask, imgname) in enumerate(train_loader):
        #print(step)
       
        batch_time = time.time()
        
        img_l, img_s, img_lf, img_sf, gt_label, img_mask = img_l.cuda(), img_s.cuda(), img_lf.cuda(), img_sf.cuda(), gt_label.cuda(), img_mask.cuda()
        #pdb.set_trace()
        train_logit_l, train_logit_2, train_logit_3, train_logit_4, train_logit_5,  _= model(img_l, img_mask, gt_label)
        
        #get_att(imgname, gt_label, train_logit_4, train_logit_4)
        #pdb.set_trace()
        #train_logit_s, cha_att_s, spa_att_s = model(img_s, gt_label)
        #train_logit_lf, cha_att_lf, spa_att_lf = model(img_lf, gt_label)
        #train_logit_sf, cha_att_sf, spa_att_sf = model(img_sf, gt_label)
        '''
        show_mask(imgname, spa_att_l, spa_att_lf, spa_att_s, spa_att_sf)
        pdb.set_trace()
        '''
        #flip
        '''
        flip_grid_large = grid_l.expand(img_l.size()[0], -1, -1, -1)
        flip_grid_large = Variable(flip_grid_large, requires_grad = False)
        flip_grid_large = flip_grid_large.permute(0, 2, 3, 1)
        hm_l = F.grid_sample(spa_att_l, flip_grid_large, mode = 'bilinear', padding_mode = 'border')
        '''
        #pdb.set_trace()

        '''
        flip_grid_small = grid_s.expand(img_l.size()[0], -1, -1, -1)
        flip_grid_small = Variable(flip_grid_small, requires_grad = False)
        flip_grid_small = flip_grid_small.permute(0, 2, 3, 1)
        hm_s = F.grid_sample(spa_att_s, flip_grid_small, mode = 'bilinear',padding_mode = 'border')
        '''
        
        #flip_loss =  F.mse_loss(hm_l, spa_att_lf)  #+ F.mse_loss(hm_s, spa_att_sf)
        
        #scale
        
        '''
        hm_l = F.upsample(spa_att_l, (32, 24))
        #hm_s = F.upsample(spa_att_lf, (32, 24))
        scale_loss = F.mse_loss(hm_l, spa_att_s) #+ F.mse_loss(hm_s, spa_att_sf)            
        '''
        
       
        if loss  == 'CONSIST_LOSS':
            train_loss_l = 0.3*criterion(train_logit_l, gt_label) + 0.3*criterion(train_logit_3, gt_label)+ 0.7*criterion(train_logit_4, gt_label) +criterion(train_logit_5, gt_label)#+ 0.7*criterion(train_logit_3, gt_label)+1*criterion(train_logit_4, gt_label)#+train_logit_4#+criterion(train_logit_2, gt_label)
            '''
            #每个属性关注的区域尽量正交
          
            logit = train_logit_3.permute(1,0,2,3)#35, BS, H, W
            logit = logit.reshape(64, 35 ,-1)
            
            #pdb.set_trace()
            dot = torch.matmul(logit, logit.permute(0,2,1))#BS , 35, 35
            mask = torch.from_numpy(np.load('mask.npy', allow_pickle=True)).float().cuda()
            mask = mask.unsqueeze(0)
            mask = mask.expand_as(dot)
            dot = dot[mask==1]
            '''
            '''
            for i in range(64):
                #pdb.set_trace()
                dot[i] = dot[i] -  torch.diag(torch.diag(dot[i]))
            #pdb.set_trace()
            '''
            '''
            loss_1 = torch.mean(dot)
            
            #每个属性关注的距离关系
            loc = 0
            for i in range(64):
                #pdb.set_trace()
                feat = logit[i].reshape(35, -1)
                sort, index = torch.max(feat, dim=1)
                index  = (index/12).float()
                height_0 = torch.mean(index[0:5])
                height_1 = torch.mean(index[5:15])
                height_2 = torch.mean(index[15:21])
                height_3 = torch.mean(index[21:25])
                height_4 = torch.mean(index[25:30])                
                loc = loc+torch.mean(torch.exp(-(height_3-height_4))+torch.exp(-(height_3-height_2))+torch.exp(-(height_2-height_1))+torch.exp(-(height_4-height_2))+torch.exp(-(height_1-height_0)))
            
            
            
            '''
            #pdb.set_trace()
            '''
            for name, m in model.named_modules():
            # 'name' can be used to exclude some specified layers
            
                if isinstance(m, (nn.Linear, nn.Conv2d)):
                    #pdb.set_trace()
                    mma_loss = get_mma_loss(m.weight)
                    train_loss_l = train_loss_l + 0.03 * mma_loss
            '''
            #loc = get_feat_loss(feature_map, gt_label)
            #
            #train_loss_s = criterion(train_logit_s, gt_label)
            #train_loss_lf = criterion(train_logit_lf, gt_label)
            #train_loss_sf = criterion(train_logit_sf, gt_label)
            #keypoints  = cha_att_l.view(-1,35,2)
            #var_h = torch.std(keypoints[:,:,0], dim=1)
            #var_w = torch.std(keypoints[:,:,1], dim=1)
            #height_0, height_1, height_2, height_3 = keypoints[:,0], keypoints[:,3],  keypoints[:,6],  keypoints[:,9]
            #loc = torch.sum(torch.exp(-(height_3-height_2))+torch.exp(-(height_2-height_1))+torch.exp(-(height_1-height_0)))
            #height = torch.mean(torch.mean(torch.max(keypoints, dim=2)[1].float(), dim=2), dim=0)
            #height_0, height_1, height_2, height_3 = height[0], height[1],  height[2],  height[3]
            #loc = torch.sum(torch.exp(-(height_3-height_2))+torch.exp(-(height_2-height_1))+torch.exp(-(height_1-height_0)))
            train_loss = train_loss_l+_#+loss_1 + 0.03*loc  #+ 30*(torch.exp(-torch.sum(var_w)))
            #pdb.set_trace()
            #train_loss = train_loss_l #+ dice_loss_with_sigmoid(spa_att_l, img_seg)
        train_loss.backward()
        clip_grad_norm_(model.parameters(), max_norm=10.0)  # make larger learning rate works
        optimizer.step()
        optimizer.zero_grad()
        loss_meter.update(to_scalar(train_loss))

        gt_list.append(gt_label.cpu().numpy())
        train_probs = torch.sigmoid(train_logit_l)
        #train_probs_2 = torch.sigmoid(train_logit_2)
        #train_probs_3 = torch.sigmoid(train_logit_3)
        #train_probs_4 = torch.sigmoid(train_logit_4)
        #train_max = (train_probs + train_probs_2)/2
        #preds_probs.append(train_max.detach().cpu().numpy())
        preds_probs.append(train_probs.detach().cpu().numpy())
        log_interval = 20
        
        if (step + 1) % log_interval == 0 or (step + 1) % len(train_loader) == 0:
            print(f'{time_str()}, Step {step}/{batch_num} in Ep {epoch}, {time.time() - batch_time:.2f}s ',
                  f'train_loss:{loss_meter.val:.4f}')

    train_loss = loss_meter.avg

    gt_label = np.concatenate(gt_list, axis=0)
    preds_probs = np.concatenate(preds_probs, axis=0)

    print(f'Epoch {epoch}, LR {lr}, Train_Time {time.time() - epoch_time:.2f}s, Loss: {loss_meter.avg:.4f}')

    return train_loss, gt_label, preds_probs
コード例 #15
0
def batch_trainer(epoch, model, train_loader, criterion, optimizer, loss):
    model.train()
    epoch_time = time.time()
    loss_meter = AverageMeter()

    batch_num = len(train_loader)
    gt_list = []
    preds_probs = []

    lr = optimizer.param_groups[1]['lr']

    for step, (imgs, gt_label, imgname) in enumerate(train_loader):
        #print(step)

        batch_time = time.time()
        imgs, gt_label = imgs.cuda(), gt_label.cuda()

        train_logits = model(imgs, gt_label)
        if loss == 'KL_LOSS':
            sim = np.load('src/sim.npy')
            cls_weight = model.state_dict(
            )['module.classifier.logits.0.weight']
            #pdb.set_trace()
            cls_weight_t = torch.transpose(cls_weight, 1, 0)
            cls = torch.mm(cls_weight, cls_weight_t)
            cls = torch.triu(cls, 1).view(-1)
            sim = torch.from_numpy(sim).float().cuda(non_blocking=True)
            #pdb.set_trace()
            sim = torch.triu(sim, 1).view(-1)
            #pdb.set_trace()
            kl_mean = F.kl_div(cls.softmax(dim=-1).log(),
                               sim.softmax(dim=-1),
                               reduction='sum')

            train_loss = criterion(train_logits, gt_label) + kl_mean

        if loss == 'KL2_LOSS':
            sim = np.load('src/sim.npy')
            cls_weight = l2_norm(train_logits, 0)
            cls_weight_t = torch.transpose(cls_weight, 1, 0)

            cls = torch.mm(cls_weight_t, cls_weight)
            cls = torch.triu(cls, 1).view(-1)
            sim = torch.from_numpy(sim).float().cuda(non_blocking=True)
            sim = torch.triu(sim, 1).view(-1)
            #pdb.set_trace()
            kl_mean = F.kl_div(cls.softmax(dim=-1).log(),
                               sim.softmax(dim=-1),
                               reduction='sum')

            train_loss = criterion(train_logits, gt_label) + kl_mean
        if loss == 'MAP_LOSS':
            #drivation
            #index = torch.argmax(spa_att)
            #pdb.set_trace()
            #IOU
            c_2 = torch.sum(mask.float().cuda() * spa_att)

            #l1 norm
            c_3 = torch.sum(spa_att).float().cuda()

            train_loss = criterion(
                train_logits, gt_label) - torch.log(c_2) + 1.2 * torch.log(c_3)

        if loss == 'BCE_LOSS':
            '''
            for name, m in model.named_modules():
                #print(name)
                #'name' can be used to exclude some specified layers
                if name=='module.classifier.fc_1':
                    #pdb.set_trace()
                    #for i in [1,6,12,19,22,23,28,29,31,35,40,42,45,50,52,58,60]:
                    #   y_cov = y_cov + output_0[:,i,:,:]                    
                    mma = get_mma_loss(m.weight)
            '''
            #pdb.set_trace()
            #mm = torch.stack( [torch.sum(cha_att, dim=0), torch.sum(spa_att, dim=0)], dim=0)
            #mma = get_mma_loss(mm)
            train_loss = criterion(
                train_logits, gt_label
            )  #+0.5*mma#+ 0.5*criterion(cha_att, gt_label) #+ 0.02*torch.abs((32-torch.sum(torch.abs(cha_att))))
            #train_loss = criterion(train_logits * (mask.expand_as(train_logits)), gt_label * (mask.expand_as(gt_label))) #+0.5*mma#+ 0.5*criterion(cha_att, gt_label) #+ 0.02*torch.abs((32-torch.sum(torch.abs(cha_att))))
        train_loss.backward()
        clip_grad_norm_(model.parameters(),
                        max_norm=10.0)  # make larger learning rate works
        optimizer.step()
        optimizer.zero_grad()
        loss_meter.update(to_scalar(train_loss))

        gt_list.append(gt_label.cpu().numpy())
        train_probs = torch.sigmoid(train_logits)
        #gt_list.append(gt_label[:, 6:].cpu().numpy())
        #train_probs = torch.sigmoid(train_logits[:, 6:])
        preds_probs.append(train_probs.detach().cpu().numpy())

        log_interval = 20
        if (step + 1) % log_interval == 0 or (step +
                                              1) % len(train_loader) == 0:
            print(
                f'{time_str()}, Step {step}/{batch_num} in Ep {epoch}, {time.time() - batch_time:.2f}s ',
                f'train_loss:{loss_meter.val:.4f}')

    train_loss = loss_meter.avg

    gt_label = np.concatenate(gt_list, axis=0)
    preds_probs = np.concatenate(preds_probs, axis=0)

    print(
        f'Epoch {epoch}, LR {lr}, Train_Time {time.time() - epoch_time:.2f}s, Loss: {loss_meter.avg:.4f}'
    )

    return train_loss, gt_label, preds_probs
コード例 #16
0
def batch_trainer(epoch, model, train_loader, criterion, optimizer, loss):
    model.train()
    epoch_time = time.time()
    loss_meter = AverageMeter()

    batch_num = len(train_loader)
    gt_list = []
    preds_probs = []

    lr = optimizer.param_groups[1]['lr']

    for step, (imgs, depth, gt_label, imgname) in enumerate(train_loader):
        #pdb.set_trace()
        #print(step)
        batch_time = time.time()
        imgs, gt_label = imgs.cuda(), gt_label.cuda()
        #train_logits, depth_logits = model(imgs, depth, gt_label)
        train_logits = model(imgs, depth, gt_label)
        #train_logits = model(imgs,  gt_label)          
        if loss == 'KL_LOSS':
            sim = np.load('src/sim.npy')
            cls_weight = model.state_dict()['module.classifier.logits.0.weight']
            #pdb.set_trace()
            cls_weight_t = torch.transpose(cls_weight, 1, 0)
            cls = torch.mm(cls_weight, cls_weight_t)
            cls = torch.triu(cls, 1).view(-1)
            sim = torch.from_numpy(sim).float().cuda(non_blocking=True)
            #pdb.set_trace()
            sim = torch.triu(sim, 1).view(-1)
            #pdb.set_trace()
            kl_mean = F.kl_div(cls.softmax(dim=-1).log(), sim.softmax(dim=-1), reduction='sum')

            train_loss = criterion(train_logits, gt_label) + kl_mean
        
        if loss == 'KL2_LOSS':
            sim = np.load('src/sim.npy')
            cls_weight = l2_norm(train_logits, 0)
            cls_weight_t = torch.transpose(cls_weight, 1, 0)
           
            cls = torch.mm(cls_weight_t, cls_weight)
            cls = torch.triu(cls, 1).view(-1)
            sim = torch.from_numpy(sim).float().cuda(non_blocking=True)
            sim = torch.triu(sim, 1).view(-1)
            #pdb.set_trace()
            kl_mean = F.kl_div(cls.softmax(dim=-1).log(), sim.softmax(dim=-1), reduction='sum')

            train_loss = criterion(train_logits, gt_label) + kl_mean
        if loss == 'BCE_LOSS':
            train_loss = criterion(train_logits, gt_label)
            
        train_loss.backward()
        clip_grad_norm_(model.parameters(), max_norm=10.0)  # make larger learning rate works
        optimizer.step()
        optimizer.zero_grad()
        loss_meter.update(to_scalar(train_loss))

        gt_list.append(gt_label.cpu().numpy())
        train_probs = torch.sigmoid(train_logits)
        preds_probs.append(train_probs.detach().cpu().numpy())

        log_interval = 20
        if (step + 1) % log_interval == 0 or (step + 1) % len(train_loader) == 0:
            print(f'{time_str()}, Step {step}/{batch_num} in Ep {epoch}, {time.time() - batch_time:.2f}s ',
                  f'train_loss:{loss_meter.val:.4f}')

    train_loss = loss_meter.avg

    gt_label = np.concatenate(gt_list, axis=0)
    preds_probs = np.concatenate(preds_probs, axis=0)

    print(f'Epoch {epoch}, LR {lr}, Train_Time {time.time() - epoch_time:.2f}s, Loss: {loss_meter.avg:.4f}')

    return train_loss, gt_label, preds_probs