示例#1
0
def validate(
    model=None,
    data_loader=None,
):

    print('Validating...')

    val_loss_meter = pyutils.AverageMeter('loss')
    model.eval()

    with torch.no_grad():
        for _, data in tqdm(
                enumerate(data_loader),
                total=len(data_loader),
                ncols=100,
        ):
            inputs, labels = data['img'], data['label'].cuda()

            #outputs = model(inputs)
            outputs, x_hist, x_word, y_word = model(inputs)
            # forward + backward + optimize
            loss1 = F.multilabel_soft_margin_loss(outputs, labels)
            loss2 = F.multilabel_soft_margin_loss(x_hist, labels)
            loss3 = F.multilabel_soft_margin_loss(x_word, y_word)

            val_loss_meter.add({'loss': loss1.item()})

    model.train()

    return val_loss_meter.pop('loss')
示例#2
0
def validate(model=None, data_loader=None, codebook=None):

    print('Validating...')

    val_loss_meter = pyutils.AverageMeter('loss')
    model.eval()

    with torch.no_grad():
        for _, data in tqdm(
                enumerate(data_loader),
                total=len(data_loader),
                ncols=100,
        ):
            inputs, labels = data['img'], data['label'].cuda()

            #outputs = model(inputs)
            outputs, x_hist, x_word, y_word, xx, _ = model(inputs,
                                                           codebook=codebook)
            # forward + backward + optimize
            loss1 = F.multilabel_soft_margin_loss(outputs, labels)
            loss2 = F.multilabel_soft_margin_loss(x_hist, labels)
            loss3 = F.multilabel_soft_margin_loss(x_word, y_word)
            #loss4 = loss_re.mean()

            #val_loss_meter.add({'loss': loss1.item()})

    model.train()
    print('val loss1: %f, loss2: %f, loss3: %f\n' % (loss1, loss2, loss3))
    return True
示例#3
0
def validate(model=None, data_loader=None,):

    print('Validating...')

    val_loss_meter = pyutils.AverageMeter('loss')
    model.eval()

    with torch.no_grad():
        for _, data in tqdm(enumerate(data_loader), total=len(data_loader), ncols=100,):
            inputs, labels = data['img'], data['label'].cuda()

            #outputs = model(inputs)
            outputs, x_hist, x_word, y_word, loss_decov, loss_entropy = model(inputs)
            # forward + backward + optimize
            loss1 = F.multilabel_soft_margin_loss(outputs, labels)
            loss2 = F.multilabel_soft_margin_loss(x_hist, labels)
            loss3 = F.multilabel_soft_margin_loss(x_word, y_word)
            loss4 = loss_decov.mean()
            loss5 = loss_entropy.mean()

            val_loss_meter.add({'loss1': loss1.item(), 'loss2': loss2.item(), 'loss3': loss3.item(), 'loss4': loss4.item(), 'loss5': loss5.item(),})

    model.train()
    print('val loss1: %f, loss2: %f, loss3: %f, loss_decov: %f, loss_entropy: %f...'%(val_loss_meter.pop('loss1'), val_loss_meter.pop('loss2'), val_loss_meter.pop('loss3'),val_loss_meter.pop('loss4'), val_loss_meter.pop('loss5')))

    return True
示例#4
0
文件: loss.py 项目: 6clc/IRN
    def forward(self, preds, labels):
        logits = F.sigmoid(preds)

        # effective_num = 1.0 - np.power(self.beta, self.samples_per_cls)
        # weights = (1.0 - self.beta) / np.array(effective_num)
        # weights = weights / np.sum(weights) * self.no_of_classes
        weights = torch.tensor(self.samples_per_cls, device=labels.device)

        # weights = weights.unsqueeze(0)
        # # print(weights.shape, labels.shape, self.no_of_classes)
        # weights = weights.repeat(labels.shape[0],1) * labels
        # weights = weights.sum(1)
        # weights = weights.unsqueeze(1)
        # weights = weights.repeat(1,self.no_of_classes)

        if self.loss_type == "focal":
            cb_loss = focal_loss(labels, logits, weights, self.gamma)
        elif self.loss_type == "sigmoid":
            cb_loss = F.binary_cross_entropy_with_logits(input=logits,
                                                         target=labels,
                                                         weights=weights)
        elif self.loss_type == "softmax":
            pred = logits.softmax(dim=1)
            cb_loss = F.binary_cross_entropy(input=pred,
                                             target=labels,
                                             weight=weights)
        elif self.loss_type == 'msl':
            cb_loss = F.multilabel_soft_margin_loss(input=preds,
                                                    target=labels,
                                                    weight=weights)
        return cb_loss
示例#5
0
def class_reg_loss98_6(input: Tensor,
                       target: Tensor,
                       output2: Tensor,
                       output3: Tensor,
                       size_average: bool = True,
                       reduce: bool = True,
                       difficult_samples: bool = False,
                       tl: int = 5) -> Tensor:
    """
    loss in the first stage for both pascal and coco
    tl: substizing range
    """
    gt = target.clone()

    index2 = gt != 0
    target[index2] = 1
    index2_2 = gt <= tl - 1
    index2_4 = gt >= tl
    index2 = index2 & index2_2
    num_class = int(gt.size()[1])
    loss2 = torch.nn.MSELoss()
    loss5 = torch.nn.MarginRankingLoss(margin=0.0)

    aggregation1 = F.adaptive_avg_pool2d(output2, 1).squeeze(2).squeeze(2)

    loss_all = loss2(aggregation1[index2],
                     gt[index2]) + F.multilabel_soft_margin_loss(
                         input, target, None, size_average, reduce)
    if torch.sum(index2_4) != 0:
        num_ins_5 = torch.sum(index2_4)
        loss_all = loss_all + 0.1 * loss5(
            aggregation1[index2_4],
            tl * torch.ones((num_ins_5, )).cuda(),
            torch.ones((num_ins_5, )).cuda())
    return loss_all
示例#6
0
def class_reg_loss96_7_2(
    input: Tensor, 
    target: Tensor,
    output2: Tensor,
    output3: Tensor,
    size_average: bool = True,
    reduce: bool = True) -> Tensor:
    """loss in second stage for pascal
    """
    gt=target.clone()
    target[gt!=0]=1
    p_vs_all=torch.sum(target,0)/target.size()[0]
    p_vs_all=p_vs_all<0.2
    index1=gt==0

    index_neg3=(index1&p_vs_all).float()*torch.rand((gt.size()[0],gt.size()[1])).cuda().float()
    index_neg3=index_neg3.float()<1.1

    index_neg3=index_neg3
    index1_sam=index_neg3&index1
    index1_sam=index1_sam
    # index1_pre=input>=0
    # index1=(index1&index1_pre)
    index2=gt!=0
    index2_2=gt<=4   # below sb
    index2_4=gt>=5   # beyond sb
    index2=index2&index2_2

    index2_3=index2.clone()
    index2_3=index1_sam|index2_3   # ???

    batch_size=int(gt.size()[0])
    num_class=int(gt.size()[1])
    loss2 = torch.nn.BCEWithLogitsLoss()
    loss3 = torch.nn.BCEWithLogitsLoss()
    loss4 = torch.nn.MSELoss()
    loss5 = torch.nn.MarginRankingLoss(margin=0.0)
    # gaussian_filter=gauss_filter(5,1)
    # output4=gaussian_filter(output3)
    output4=output3
    index3=output4!=0
    index3=(index2.view(index2.size()[0],index2.size()[1],1,1)&index3)
    #index4=output2!=0
    index4=(output2!=0)|(output2==0)
    aggregation1 = F.adaptive_avg_pool2d(output2, 1).squeeze(2).squeeze(2)
    index4=(index1_sam.view(index1_sam.size()[0],index1_sam.size()[1],1,1)&index4)
    output4.detach_()
    # print(index3.size(),index4.size())
    loss_all=F.multilabel_soft_margin_loss(input, target, None, size_average, reduce)
    if torch.sum(index4)!=0:
        loss_all=loss_all+loss3(output2[index4],output4[index4])
    if torch.sum(index3)!=0:
        loss_all=loss_all+loss2(output2[index3], output4[index3])
    if torch.sum(index2_3)!=0:
        loss_all=loss_all+loss4(aggregation1[index2_3],gt[index2_3])
        loss_all=loss_all+3*torch.sum(1.0 / (gt[index2_3] + 1) * (aggregation1[index2_3] - gt[index2_3]) ** 2)  # relMSE
    if torch.sum(index2_4)!=0:
        num_ins_5=torch.sum(index2_4)
        loss_all=loss_all+0.1*loss5(aggregation1[index2_4],5*torch.ones((num_ins_5,)).cuda(),torch.ones((num_ins_5,)).cuda())
    return loss_all
示例#7
0
def validate(model, data_loader):
    print('validating ... ', flush=True, end='')

    val_loss_meter = pyutils.AverageMeter('loss1', 'loss2')

    model.eval()

    with torch.no_grad():
        for pack in data_loader:
            img = pack['img']

            label = pack['label'].cuda(non_blocking=True)

            x = model(img)
            loss1 = F.multilabel_soft_margin_loss(x, label)

            val_loss_meter.add({'loss1': loss1.item()})

    model.train()

    validation_loss = (val_loss_meter.pop('loss1'))

    print('loss: %.4f' % validation_loss)

    return validation_loss
示例#8
0
def validate(
    model=None,
    data_loader=None,
):

    print('Validating...')

    val_loss_meter = pyutils.AverageMeter('loss')
    model.eval()

    with torch.no_grad():
        for _, data in tqdm(enumerate(data_loader),
                            total=len(data_loader),
                            ascii=' 123456789#'):
            _, inputs, labels = data

            #inputs = inputs.to()
            #labels = labels.to(inputs.device)

            outputs, _ = model(inputs)
            labels = labels.to(outputs.device)

            loss = F.multilabel_soft_margin_loss(outputs, labels)
            val_loss_meter.add({'loss': loss.item()})

    model.train()

    return val_loss_meter.pop('loss')
示例#9
0
def validate(model, data_loader):
    print('\nvalidating ... ', flush=True, end='')

    val_loss_meter = pyutils.AverageMeter('loss')
    val_loss_metersub = pyutils.AverageMeter('losssub')

    model.eval()

    with torch.no_grad():
        for pack in data_loader:
            img = pack[1]
            label = pack[2].cuda(non_blocking=True)

            xf, x, xsub = model(img)
            loss = F.multilabel_soft_margin_loss(x, label)

            val_loss_meter.add({'loss': loss.item()})
            # losssub = F.multilabel_soft_margin_loss(xsub, labelssubi)

            # val_loss_metersub.add({'losssub': losssub.item()})
    model.train()

    print('loss:', val_loss_meter.pop('loss'))

    return
示例#10
0
    def training_step(self, batch, batch_idx):
        features, labels = batch
        predictions = self.forward(features)
        loss = F.multilabel_soft_margin_loss(predictions, labels)

        logs = {"loss": loss}
        return {"loss": loss, "log": logs}
示例#11
0
 def train_on_batch(self, batch):
     self.train()
     img = torch.stack(batch['img']).cuda(non_blocking=True)
     label = torch.stack(batch['label']).cuda(non_blocking=True)
     x = self.model(img)
     loss = F.multilabel_soft_margin_loss(x, label)
     return loss
    def get_loss(self, batch):
        x, label = batch
        score, seg, cam, seeds = self(x)
        cls_loss = F.multilabel_soft_margin_loss(score, label)
        if self.seg_ratio == 0:
          return cls_loss

        criterion = torch.nn.CrossEntropyLoss(ignore_index=self.num_classes)
        seeds = torch.argmax(seeds, dim=1)
        seg_padded = torch.cat((seg, seg[:,:1,::]), dim=1)
        seed_loss = criterion(seg_padded, seeds)
        
        probs = nn.Softmax(dim=1)(seg)
        resize_img = nn.Upsample(size=x.shape[2:], mode='bilinear', align_corners=True)
        probs = resize_img(probs)
        roi = resize_img(seeds.unsqueeze(1).float()).squeeze(1)
        denormalized_image = denormalizeimage(x, mean=mean, std=std)
        densecrfloss = self.densecrflosslayer(denormalized_image,probs,roi)
        self.loss_decomp['cls'] += [cls_loss.detach()]
        self.loss_decomp['seed'] += [seed_loss.detach()]
        self.loss_decomp['dCRF'] += [densecrfloss.detach()]
        seed_loss += densecrfloss.item()
        loss = cls_loss + self.seg_ratio * seed_loss

        return loss
示例#13
0
def validate(model, data_loader):
    print('validating ... ', flush=True, end='')

    val_loss_meter = pyutils.AverageMeter('loss1', 'loss2')

    model.eval()

    with torch.no_grad():
        for pack in data_loader:
            #############################修改代码#############################
            img = pack['img']
            label = pack['label'].cuda(non_blocking=True)
            aug_img = pack['aug_img']
            aug_label = pack['aug_label'].cuda(non_blocking=True)
            con_imgs = torch.cat([img, aug_img], 0)
            con_labels = torch.cat([label, aug_label], 0)
            x = model(con_imgs)

            loss1 = F.multilabel_soft_margin_loss(x, con_labels)
            #############################修改代码#############################

            val_loss_meter.add({'loss1': loss1.item()})

    model.train()

    print('loss: %.4f' % (val_loss_meter.pop('loss1')))

    return
示例#14
0
def get_loss(loss_function, output, label, use_gpu):
    '''
    get objective loss of model and backprograte to compute gradients
    some loss function not impelement
    '''
    if not isinstance(loss_function, str):
        raise TypeError('loss_function should be str object')
    label = np.asarray(label)

    if loss_function == 'binary_cross_entropy':
        loss = F.binary_cross_entropy(output, label)
    elif loss_function == 'poisson_nll_loss':
        loss = F.poisson_nll_loss(output, label)
    elif loss_function == 'cross_entropy':
        loss = F.cross_entropy(output, label)
    elif loss_function == 'hinge_embedding_loss':
        loss = F.hinge_embedding_loss(output, label)
    elif loss_function == 'margin_ranking_loss':
        loss = F.margin_ranking_loss(output, label)
    elif loss_function == 'multilabel_soft_margin_loss':
        loss = F.multilabel_soft_margin_loss(output, label)
    elif loss_function == 'multi_margin_loss':
        loss = F.multi_margin_loss(output, label)
    elif loss_function == 'nll_loss':
        if use_gpu:
            label = Variable(torch.LongTensor(label).cuda())
        label = Variable(torch.LongTensor(label))
        loss = F.nll_loss(output, label)
    elif loss_function == 'binary_cross_entropy_with_logits':
        loss = F.binary_cross_entropy_with_logits(output, label)

    return loss
示例#15
0
def validate(model, val_loader):

    print('\nvalidating ... ', flush=True, end='')
    val_loss = AverageMeter()
    model.eval()

    with torch.no_grad():
        for idx, dat in tqdm(enumerate(val_loader)):
            # img_name, img, label = dat
            _, _, input1, input2, _, label1, label2 = dat
            # label1 = label1.cuda(non_blocking=True)
            label1 = label1.cuda()
            img = [input1, input2]
            # print("here: ",img.size())
            logits, co_logits = model(img)

            # if len(logits.shape) == 1:
            #     logits = logits.reshape(label.shape)
            # print(logits.size(),label.size(),img.size())
            loss_val = F.multilabel_soft_margin_loss(
                logits[:int(input1.size(0))], label1)
            val_loss.update(loss_val.data.item(),
                            input1.size()[0] + input2.size()[0])

    print('validating loss:', val_loss.avg)
示例#16
0
def validate(model, data_loader):
    print('\nvalidating ... ', flush=True, end='')

    val_loss_meter = pyutils.AverageMeter('loss')

    model.eval()

    with torch.no_grad():
        for pack in data_loader:
            img = pack[1]
            label = pack[2]
            if is_cuda_available:
                label = label.cuda(non_blocking=True)

            x = model(img)
            loss = F.multilabel_soft_margin_loss(x, label)

            val_loss_meter.add({'loss': loss.item()})

    model.train()
    val_loss = val_loss_meter.pop('loss')
    print('loss:', val_loss)
    wandb.log({
        "val_loss": val_loss
    })
    return
示例#17
0
文件: nn_ops.py 项目: yuguo68/pytorch
 def forward(self):
     a = torch.randn(3, 2)
     b = torch.rand(3, 2)
     c = torch.rand(3)
     log_probs = torch.randn(50, 16, 20).log_softmax(2).detach()
     targets = torch.randint(1, 20, (16, 30), dtype=torch.long)
     input_lengths = torch.full((16, ), 50, dtype=torch.long)
     target_lengths = torch.randint(10, 30, (16, ), dtype=torch.long)
     return len(
         F.binary_cross_entropy(torch.sigmoid(a), b),
         F.binary_cross_entropy_with_logits(torch.sigmoid(a), b),
         F.poisson_nll_loss(a, b),
         F.cosine_embedding_loss(a, b, c),
         F.cross_entropy(a, b),
         F.ctc_loss(log_probs, targets, input_lengths, target_lengths),
         # F.gaussian_nll_loss(a, b, torch.ones(5, 1)), # ENTER is not supported in mobile module
         F.hinge_embedding_loss(a, b),
         F.kl_div(a, b),
         F.l1_loss(a, b),
         F.mse_loss(a, b),
         F.margin_ranking_loss(c, c, c),
         F.multilabel_margin_loss(self.x, self.y),
         F.multilabel_soft_margin_loss(self.x, self.y),
         F.multi_margin_loss(self.x, torch.tensor([3])),
         F.nll_loss(a, torch.tensor([1, 0, 1])),
         F.huber_loss(a, b),
         F.smooth_l1_loss(a, b),
         F.soft_margin_loss(a, b),
         F.triplet_margin_loss(a, b, -b),
         # F.triplet_margin_with_distance_loss(a, b, -b), # can't take variable number of arguments
     )
示例#18
0
def PRMLoss(model, batch, visualize=False):
    n, c, h, w = batch["images"].shape

    model.train()
    O = model(batch["images"].cuda())
    loss = F.multilabel_soft_margin_loss(O,
                                         (batch["counts"].cuda() > 0).float())

    return loss
示例#19
0
    def validation_step(self, batch, batch_idx):
        features, labels = batch
        predictions = self.forward(features)
        loss = F.multilabel_soft_margin_loss(predictions, labels)

        with torch.no_grad():
            pred = torch.sigmoid(predictions).cpu().numpy()
            actual = labels.cpu().numpy()

            lrap_score = torch.tensor(LRAP(actual, pred))

        return {"val_loss": loss, "LRAP": lrap_score}
示例#20
0
def run(args):
    model = getattr(importlib.import_module(args.cam_network), 'Net')()
    train_dataset = voc12.dataloader.VOC12ClassificationDataset(args.train_list, voc12_root=args.voc12_root,
                                                                resize_long=(320, 640), hor_flip=True,
                                                                crop_size=512, crop_method="random")
    train_data_loader = DataLoader(train_dataset, batch_size=args.cam_batch_size,
                                   shuffle=True, num_workers=args.num_workers, pin_memory=True, drop_last=True)
    max_step = (len(train_dataset) // args.cam_batch_size) * args.cam_num_epoches

    val_dataset = voc12.dataloader.VOC12ClassificationDataset(args.val_list, voc12_root=args.voc12_root,
                                                              crop_size=512)
    val_data_loader = DataLoader(val_dataset, batch_size=args.cam_batch_size,
                                 shuffle=False, num_workers=args.num_workers, pin_memory=True, drop_last=True)
    print('train_cam  val_data_loader')
    param_groups = model.trainable_parameters()
    optimizer = torchutils.PolyOptimizer([
        {'params': param_groups[0], 'lr': args.cam_learning_rate, 'weight_decay': args.cam_weight_decay},
        {'params': param_groups[1], 'lr': 10*args.cam_learning_rate, 'weight_decay': args.cam_weight_decay},
    ], lr=args.cam_learning_rate, weight_decay=args.cam_weight_decay, max_step=max_step)

    model = torch.nn.DataParallel(model).cuda()
    model.train()

    avg_meter = pyutils.AverageMeter()

    timer = pyutils.Timer()

    for ep in range(args.cam_num_epoches):
        print('Epoch %d/%d' % (ep+1, args.cam_num_epoches))
        for step, pack in enumerate(train_data_loader):
            img = pack['img']
            label = pack['label'].cuda(non_blocking=True)
            x = model(img)
            loss = F.multilabel_soft_margin_loss(x, label)
            avg_meter.add({'loss1': loss.item()})
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (optimizer.global_step-1)%100 == 0:
                timer.update_progress(optimizer.global_step / max_step)
                print('step:%5d/%5d' % (optimizer.global_step - 1, max_step),
                      'loss:%.4f' % (avg_meter.pop('loss1')),
                      'imps:%.1f' % ((step + 1) * args.cam_batch_size / timer.get_stage_elapsed()),
                      'lr: %.4f' % (optimizer.param_groups[0]['lr']),
                      'etc:%s' % (timer.str_estimated_complete()), flush=True)

        else:
            validate(model, val_data_loader)
            timer.reset_stage()

    torch.save(model.module.state_dict(), args.cam_weights_name + '.pth')
    torch.cuda.empty_cache()
示例#21
0
def train(args, desc, defi, sememe, encoder, decoder, optimizer, lang):
    optimizer.zero_grad()
    input_length = desc.size()[1]
    target_length = sememe.size()[1]
    batch_size = desc.size()[0]  # type: int
    assert batch_size == sememe.size()[0]
    sememe_mask = (sememe>0).int()
    if use_cuda: sememe_mask = sememe_mask.cuda()
    loss = 0.

    # encoding part
    encoder_output, encoder_state = encoder(args, desc, defi)

    # decoding part
    if args.architecture == 'multi-label':
        output = decoder(encoder_state)
        out_weight = torch.ones(len(lang.word2id))
        for i in range(4):
            out_weight[i] = 0.
        if use_cuda: out_weight = out_weight.cuda()
        loss += F.multilabel_soft_margin_loss(output, multi_hot(sememe, len(lang.word2id)), weight=out_weight)
    elif args.architecture == 'seq2seq':
        out_weight = Variable(torch.ones(len(lang.word2id)))
        out_weight[0] = 0.
        out_weight = torch.unsqueeze(out_weight, 0)
        if use_cuda: out_weight = out_weight.cuda()
        decoder_input = Variable(torch.LongTensor([lang.SOS_token]*batch_size))
        decoder_hidden = torch.squeeze(encoder_state,0) if encoder_state.dim()==3 else encoder_state
        last_output = Variable(torch.zeros((batch_size, len(lang.word2id))))
        if use_cuda:
            decoder_input = decoder_input.cuda()
            last_output = last_output.cuda()
        history_outputs = torch.zeros((batch_size, len(lang.word2id)))
        all_sememe = F.softmax(multi_hot(sememe, len(lang.word2id)), 1)
        if use_cuda: history_outputs = history_outputs.cuda()
        for time in range(target_length):
            decoder_output, decoder_hidden, decoder_attention = decoder(args,
                decoder_input, decoder_hidden, encoder_output, Variable(history_outputs/float(time+0.00001)), last_output)
            history_outputs = history_outputs+decoder_output.data
            last_output = F.softmax(decoder_output, 1)
            if args.soft_loss:
                loss += cross_entropy(decoder_output, (multi_hot(torch.unsqueeze(sememe[:,time],1),len(lang.word2id))+all_sememe)/2., weight=out_weight)
            else:
                loss += F.cross_entropy(decoder_output, sememe[:,time], ignore_index=0)
            decoder_input = sememe[:,time]
        loss = loss/torch.sum(sememe_mask.float())
    loss.backward()
    optimizer.step()

    return loss.data
示例#22
0
def optimize_step(rnn, input_tensors, category_tensor, optimizer):
    rnn.zero_grad()
    rnn.train()
    output = rnn(input_tensors)
    # print(output)

    loss = F.multilabel_soft_margin_loss(output, category_tensor.float())
    # loss = F.binary_cross_entropy(output, category_tensor.float())
    # loss = customized_loss2(output, category_tensor.float())
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    return loss.item()
示例#23
0
 def forward(self, predict, gold, local, mention_entity, m, n):
     #predict是预测的结果,gold是标准答案
     #P_e是神经网络entity->entity的参数,SR是统计的
     loss1 = F.multilabel_soft_margin_loss(predict, gold, size_average=True)
     dist = F.pairwise_distance(local, predict)
     m_e = []
     for i in range(m):
         temp = []
         for j in range(n):
             temp.append(int(mention_entity[i][j]))
         m_e.append(temp)
     m_e1 = Variable(torch.Tensor(m_e)).cuda()
     normal = m_e1 * dist
     loss2 = normal.sum() / (len(normal))
     loss = torch.add(loss1, loss2)
     return loss
示例#24
0
def train(current_epoch):
    train_loss = AverageMeter()
    cls_acc_matrix = Cls_Accuracy()

    model.train()

    global_counter = args.global_counter
    """ learning rate decay """
    res = reduce_lr(args, optimizer, current_epoch)

    for idx, dat in enumerate(train_loader):

        img, label, _ = dat
        label = label.to('cuda', non_blocking=True)
        img = img.to('cuda', non_blocking=True)

        logit = model(img)
        """ classification loss """
        loss = F.multilabel_soft_margin_loss(logit, label)
        """ backprop """
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        cls_acc_matrix.update(logit, label)
        train_loss.update(loss.data.item(), img.size()[0])

        global_counter += 1
        """ tensorboard log """
        if global_counter % args.show_interval == 0:
            train_cls_acc = cls_acc_matrix.compute_avg_acc()

            writer.add_scalar('train loss', train_loss.avg, global_counter)
            writer.add_scalar('train acc', train_cls_acc, global_counter)

            print('Epoch: [{}][{}/{}]\t'
                  'LR: {:.5f}\t'
                  'ACC: {:.5f}\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      current_epoch,
                      idx + 1,
                      len(train_loader),
                      optimizer.param_groups[0]['lr'],
                      train_cls_acc,
                      loss=train_loss))

    args.global_counter = global_counter
示例#25
0
文件: train.py 项目: weaklySSS/I2CRC
def validate(model, val_loader):

    print('\nvalidating ... ', flush=True, end='')
    val_loss = AverageMeter()
    model.eval()

    with torch.no_grad():
        for idx, dat in tqdm(enumerate(val_loader)):
            img_name, img, label = dat
            label = label.cuda(non_blocking=True)
            logits = model(img)
            if len(logits.shape) == 1:
                logits = logits.reshape(label.shape)
            loss_val = F.multilabel_soft_margin_loss(logits, label)
            val_loss.update(loss_val.data.item(), img.size()[0])

    print('validating loss:', val_loss.avg)
示例#26
0
文件: train.py 项目: zhangyuygss/WSL
def validation(val_loader, model):
    batch_time = AverageMeter()
    accu = AverageMeter()
    losses = AverageMeter()
    # switch to evaluation mode
    model.eval()

    end = time.time()
    for i, data in enumerate(val_loader):
        input = data['image'].float()
        target = data['class'].float()
        if args.cuda:
            input_var = torch.autograd.Variable(input,
                                                volatile=True).cuda(gpuID)
            target_var = torch.autograd.Variable(target,
                                                 volatile=True).cuda(gpuID)
            target = target.cuda(gpuID)
        else:
            input_var = torch.autograd.Variable(input, volatile=True)
            target_var = torch.autograd.Variable(target, volatile=True)

        # compute output
        output = model(input_var)
        loss = F.multilabel_soft_margin_loss(output, target_var)
        if args.cuda:
            loss = loss.cuda(gpuID)

        # measure accuracy and record loss
        acc = accuracy(output, target)
        accu.update(acc)
        losses.update(loss.data[0], input.size(0))
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            info = 'Test: [{0}/{1}] '.format(i, len(val_loader)) + \
                   'Time {batch_time.val:.3f} ({batch_time.avg:.3f}) '.format(batch_time=batch_time) + \
                   'Loss {loss.val:.4f} ({loss.avg:.4f}) '.format(loss=losses) + \
                   'Accuracy {accu.val:.4f} (avg:{accu.avg:.4f}) '.format(accu=accu)
            print(info)
            if not args.no_log:
                with open(log_file, 'a+') as f:
                    f.write(info + '\n')

    return accu.avg, losses.avg
示例#27
0
def class_reg_loss98_6(
    input: Tensor, 
    target: Tensor,
    output2: Tensor,
    output3: Tensor,    
    size_average: bool = True,
    reduce: bool = True,
    difficult_samples: bool = False,
    tl: int = 5) -> Tensor:
    """
    loss in the first stage for both pascal and coco
    tl: substizing range
    """
    gt=target.clone()  # ground truth

    index2=gt!=0
    target[index2]=1
    index2_2=gt<=tl-1    # in the subitizing (sb) range
    index2_4=gt>=tl      # beyond this range
    index2=index2&index2_2   # mask of whether in the sb range
    num_class=int(gt.size()[1])
    loss2 = torch.nn.MSELoss()
    loss5 = torch.nn.MarginRankingLoss(margin=0.0)

    aggregation1 = F.adaptive_avg_pool2d(output2, 1).squeeze(2).squeeze(2)  # average of 14*14 density map for each image and each class

    loss_all=loss2(aggregation1[index2], gt[index2])+F.multilabel_soft_margin_loss(input, target, None, size_average, reduce)
    #            first term: MLE of counts             second term: classification loss  
    # we need to add another term here for the "relMLE of counts"
    
    loss_all = loss_all + 3 * torch.sum(1.0 / (gt[index2] + 1) * (aggregation1[index2] - gt[index2]) ** 2)
    # ref: https://discuss.pytorch.org/t/how-to-implement-weighted-mean-square-error/2547

    #print("hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh: start")
    #print(1.0 / (gt[index2] + 1))
    #print((1.0 / (gt[index2] + 1)).shape)
    #print("--")
    #print(gt[index2])
    #print(gt[index2].shape)
    #print("hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh: end")

    if torch.sum(index2_4)!=0:  # ranking loss
        num_ins_5=torch.sum(index2_4)
        loss_all=loss_all+0.1*loss5(aggregation1[index2_4],tl*torch.ones((num_ins_5,)).cuda(),torch.ones((num_ins_5,)).cuda())
    return loss_all
示例#28
0
def validate(model, data_loader):
    print('\nvalidating ... ', flush=True, end='')
    model.eval()
    val_loss = 0
    data_loader = tqdm(data_loader, desc='Validate')
    with torch.no_grad():

        for iter, pack in enumerate(data_loader):
            img = pack[1].cuda()
            target = pack[2].cuda()
            inp = pack[3].cuda()
            x = model(img, inp)
            loss = F.multilabel_soft_margin_loss(x, target)
            val_loss = loss + val_loss

    model.train()
    print('validate loss:', val_loss)
    return
示例#29
0
def multilabel_soft_margin_loss2(input: Tensor,
                                 target: Tensor,
                                 output2: Tensor,
                                 output3: Tensor,
                                 weight: Optional[Tensor] = None,
                                 size_average: bool = True,
                                 reduce: bool = True,
                                 difficult_samples: bool = False) -> Tensor:
    """Multilabel soft margin loss.
    """
    gt = target.clone()
    gt = gt.squeeze()
    index2 = gt != 0
    target[index2] = 1
    # gt_label = target

    return F.multilabel_soft_margin_loss(input, target, weight, size_average,
                                         reduce)
def calculate_loss(answer, pred, method):
    """
    answer = [batch, 3129]
    pred = [batch, 3129]
    """
    if method == 'binary_cross_entropy_with_logits':
        loss = F.binary_cross_entropy_with_logits(pred, answer) * config.max_answers
    elif method == 'soft_cross_entropy':
        nll = -F.log_softmax(pred, dim=1)
        loss = (nll * answer).sum(dim=1).mean()   # this is worse than binary_cross_entropy_with_logits
    elif method == 'KL_divergence':
        pred = F.softmax(pred, dim=1)
        kl = ((answer / (pred + 1e-12)) + 1e-12).log()
        loss = (kl * answer).sum(1).mean()
    elif method == 'multi_label_soft_margin':
        loss = F.multilabel_soft_margin_loss(pred, answer)
    else:
        print('Error, pls define loss function')
    return loss