Beispiel #1
0
def train(model, alphabetStr, train_loader, eval_loader, args):
    optimizer = Adam(model.parameters(), lr=args.lr)
    for epoch in range(args.epochs):
        loss_record = AverageMeter()
        model.train()
        w = args.rampup_coefficient * ramps.sigmoid_rampup(
            epoch, args.rampup_length)
        for batch_idx, (x, g_x, _, idx) in enumerate(train_loader):
            _, feat = model(x.to(device))
            _, feat_g = model(g_x.to(device))
            prob = feat2prob(feat, model.center)
            prob_g = feat2prob(feat_g, model.center)
            loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
            mse_loss = F.mse_loss(prob, prob_g)
            loss = loss + w * mse_loss
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            loss_record.update(loss.item(), x.size(0))
        print('Train Epoch: {} Avg Loss: {:.4f}'.format(
            epoch, loss_record.avg))
        _, _, _, probs = test(model, eval_loader, args)
        args.p_targets = target_distribution(probs)
    torch.save(model.state_dict(), args.model_dir)
    print("model saved to {}.".format(args.model_dir))
Beispiel #2
0
    def train_multi_graph(self):
        losses = AverageMeter()

        self.model.train()
        with tqdm(total=len(self.train_dataloader)) as pbar:
            for i, (X, Y) in enumerate(self.train_dataloader):
                # graph batch train

                # enable mask
                X[-1].ndata[
                    'feature'] = X[-1].ndata['feature'] * X[-1].ndata['mask']
                # get output from model
                output = self.model(X, Y)

                # select data and calculate loss
                predict = output[0, X[-1].ndata['mask'] == 0].view(
                    -1, self.model._num_feats)
                truth = Y[0].ndata['feature'][X[-1].ndata['mask'] == 0].view(
                    -1, self.model._num_feats)
                loss = self.criterion(predict, truth)

                self.optimizer.zero_grad()
                loss.backward()
                #TODO: 梯度裁剪等操作 √
                # clip_grad_norm(model.parameters(), max_norm=10) 一个超参需要讨论
                self.optimizer.step()
                losses.update(loss.item())
                pbar.set_description('Loss: {:.2f}'.format(loss.item()))
                pbar.update()

        self.epoch += 1
        return losses.avg
Beispiel #3
0
    def eval_multi_graph(self):
        losses = AverageMeter()
        self.model.eval()

        with tqdm(total=len(self.val_dataloader),
                  desc='Validation round') as pbar:
            for i, (X, Y) in enumerate(self.val_dataloader):
                # graph batch train

                # enable mask
                X[-1].ndata[
                    'feature'] = X[-1].ndata['feature'] * X[-1].ndata['mask']
                # get output from model
                output = self.model(X, Y)

                # select data and calculate loss
                predict = output[0, X[-1].ndata['mask'] == 0].view(
                    -1, self.model._num_feats)
                truth = Y[0].ndata['feature'][X[-1].ndata['mask'] == 0].view(
                    -1, self.model._num_feats)
                loss = self.criterion(predict, truth)

                losses.update(loss.item())
                pbar.update()

        return losses.avg
Beispiel #4
0
def PI_train(model, train_loader, eva_loader, args):
    optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args.milestones, gamma=args.gamma)
    w = 0
    for epoch in range(args.epochs):
        loss_record = AverageMeter()
        model.train()
        exp_lr_scheduler.step()
        w = args.rampup_coefficient * ramps.sigmoid_rampup(epoch, args.rampup_length) 
        for batch_idx, ((x, x_bar), label, idx) in enumerate(tqdm(train_loader)):
            x, x_bar = x.to(device), x_bar.to(device)
            _, feat = model(x)
            _, feat_bar = model(x_bar)
            prob = feat2prob(feat, model.center)
            prob_bar = feat2prob(feat_bar, model.center)
            sharp_loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
            consistency_loss = F.mse_loss(prob, prob_bar)
            loss = sharp_loss + w * consistency_loss 
            loss_record.update(loss.item(), x.size(0))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print('Train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
        _, _, _, probs = test(model, eva_loader, args, epoch)

        if epoch % args.update_interval==0:
            print('updating target ...')
            args.p_targets = target_distribution(probs) 
    torch.save(model.state_dict(), args.model_dir)
    print("model saved to {}.".format(args.model_dir))
def test(testdataloader, model):

    losses = AverageMeter()

    Ressultmat = None
    # switch to eval mode
    model.eval()

    end = time.time()

    with torch.no_grad():
        for i,d in enumerate(testdataloader):
            data,target = d
            out = model(data.cuda())
            out = F.log_softmax(out,1)
            loss = F.nll_loss(out,target.cuda())
            # record loss
            losses.update(loss.item(), out.size(0))
            resmat,acc = compute_acc(out,target)
            if Ressultmat is None:
                Ressultmat=resmat
            else:
                Ressultmat+=resmat

            print(f"loss:{loss.cpu().data.item()}, acc:{acc}, time:{time.time() - end}")
            # measure elapsed time
            end = time.time()

        return Ressultmat,losses.avg
Beispiel #6
0
def Baseline_train(model, train_loader, eva_loader, args):
    optimizer = SGD(model.parameters(),
                    lr=args.lr,
                    momentum=args.momentum,
                    weight_decay=args.weight_decay)
    exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer,
                                                milestones=args.milestones,
                                                gamma=args.gamma)
    for epoch in range(args.epochs):
        loss_record = AverageMeter()
        model.train()
        exp_lr_scheduler.step()
        for batch_idx, (x, label, idx) in enumerate(tqdm(train_loader)):
            x = x.to(device)
            feat = model(x)
            prob = feat2prob(feat, model.center)
            loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
            loss_record.update(loss.item(), x.size(0))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print('Train Epoch: {} Avg Loss: {:.4f}'.format(
            epoch, loss_record.avg))
        _, _, _, probs = test(model, eva_loader, args)
        if epoch % args.update_interval == 0:
            print('updating target ...')
            args.p_targets = target_distribution(probs)
    torch.save(model.state_dict(), args.model_dir)
    print("model saved to {}.".format(args.model_dir))
Beispiel #7
0
    def __init__(self, g_optimizer, writer, device, accuracy_predictor,
                 flops_table, CONFIG):
        self.top1 = AverageMeter()
        self.top5 = AverageMeter()
        self.losses = AverageMeter()
        self.hc_losses = AverageMeter()

        self.writer = writer
        self.device = device

        self.criterion = criterion
        self.g_optimizer = g_optimizer

        self.CONFIG = CONFIG

        self.epochs = self.CONFIG.epochs
        self.warmup_epochs = self.CONFIG.warmup_epochs
        self.search_epochs = self.CONFIG.search_epochs

        self.hardware_pool = [
            i for i in range(self.CONFIG.low_macs, self.CONFIG.high_macs, 5)
        ]
        self.hardware_index = 0
        random.shuffle(self.hardware_pool)

        self.noise_weight = self.CONFIG.noise_weight

        # ================== OFA ====================
        self.accuracy_predictor = accuracy_predictor
        self.flops_table = flops_table

        self.backbone = self.calculate_one_hot(torch.randn(8 * 21)).cuda()
Beispiel #8
0
def train(train_loader, model, criterion, optimizer, epoch, args):
    losses = AverageMeter('Loss', ':.4e')
    ap = APMeter()

    # switch to train mode
    model.train()
    train_t = tqdm(train_loader)
    for i, (images, target) in enumerate(train_t):
        images = images.cuda(args.gpu, non_blocking=True)
        target = target.cuda(args.gpu, non_blocking=True)

        output = model(images)
        loss = criterion(output, target)

        # measure accuracy and record loss
        ap.add(output.data, target.data)
        losses.update(loss.item(), images.size(0))
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        # print(ap.value())
        description = "[T:{0:3d}/{1:3d}] Loss: {2:.3f}, AP: {3:.3f}".\
            format(epoch, args.epochs, losses.avg, torch.mean(ap.value()))
        train_t.set_description(desc=description)

    return ap.value(), losses.avg
Beispiel #9
0
def validate(val_loader, model, criterion, epoch, args):
    losses = AverageMeter('Loss', ':.4e')
    ap = APMeter()

    # switch to evaluate mode
    model.eval()
    with torch.no_grad():
        val_t = tqdm(val_loader)
        for i, (images, target, image_id) in enumerate(val_t):
            images = images.cuda(args.gpu, non_blocking=True)
            target = target.cuda(args.gpu, non_blocking=True)

            # compute output
            output = model(images)
            loss = criterion(output, target)

            # measure accuracy and record loss
            ap.add(torch.sigmoid(output), target)
            losses.update(loss.item(), images.size(0))

            description = "[V:{0:3d}/{1:3d}] Loss: {2:3f}, AP: {3:.3f}".\
                format(epoch, args.epochs, losses.avg, torch.mean(ap.value()))
            val_t.set_description(description)

    return ap.value(), losses.avg
    def __init__(self, criterion, w_optimizer, w_scheduler, logger, writer, device):
        self.top1 = AverageMeter()
        self.top3 = AverageMeter()
        self.losses = AverageMeter()
        self.losses_lat = AverageMeter()
        self.losses_ce = AverageMeter()

        self.logger = logger
        self.writer = writer
        self.device = device

        self.criterion = criterion
        self.w_optimizer = w_optimizer
        self.w_scheduler = w_scheduler

        self.layers_structure = []
        self.dataset = CONFIG["dataloading"]["dataset"]

        self.cnt_epochs = CONFIG["train_settings"]["cnt_epochs"]
        self.meta_epochs = CONFIG["train_settings"]["meta_epochs"]
        self.warmup_epochs = CONFIG["train_settings"]["warmup_epochs"]
        self.print_freq = CONFIG["train_settings"]["print_freq"]
        self.path_to_save_model = CONFIG["train_settings"]["path_to_save_model"]
        self.path_to_save_structure = CONFIG["train_settings"]["path_to_save_structure"]
        self.path_to_save_acc = CONFIG["train_settings"]["path_to_save_acc"]
        self.path_to_candidate_table = CONFIG["train_settings"]["path_to_candidate_table"]
        self.ngpu = CONFIG["ngpu"]
        self.max_epochs = 0

        self.acc_record = {}
        self.candidate_table = []
        self.layer = 0

        with open(self.path_to_save_acc, "w") as f:
            json.dump(self.acc_record, f)
Beispiel #11
0
def TEP_train(model, alphabetStr, train_loader, eval_loader, args):
    optimizer = Adam(model.parameters(), lr=args.lr)
    w = 0
    alpha = 0.6
    ntrain = len(train_loader.dataset)
    Z = torch.zeros(ntrain, args.n_clusters).float().to(device)        # intermediate values
    z_ema = torch.zeros(ntrain, args.n_clusters).float().to(device)        # temporal outputs
    z_epoch = torch.zeros(ntrain, args.n_clusters).float().to(device)  # current outputs

    for epoch in range(args.epochs):
        loss_record = AverageMeter()
        model.train()
        for batch_idx, (x, g_x, _, idx) in enumerate(train_loader):
            _, feat = model(x.to(device))
            prob = feat2prob(feat, model.center)
            loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            loss_record.update(loss.item(), x.size(0))

        print('Train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
        _, _, _, probs = test(model, eval_loader, args)
        z_epoch = probs.float().to(device)
        Z = alpha * Z + (1. - alpha) * z_epoch
        z_bars = Z * (1. / (1. - alpha ** (epoch + 1)))

        if epoch % args.update_interval==0:
            args.p_targets = target_distribution(z_bars).float().to(device) 
    torch.save(model.state_dict(), args.model_dir)
    print("model saved to {}.".format(args.model_dir))
Beispiel #12
0
def TEP_train(model, train_loader, eva_loader, args):
    optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args.milestones, gamma=args.gamma)
    w = 0
    alpha = 0.6
    ntrain = len(train_loader.dataset)
    Z = torch.zeros(ntrain, args.n_clusters).float().to(device)        # intermediate values
    z_bars = torch.zeros(ntrain, args.n_clusters).float().to(device)        # temporal outputs
    z_epoch = torch.zeros(ntrain, args.n_clusters).float().to(device)  # current outputs
    for epoch in range(args.epochs):
        loss_record = AverageMeter()
        model.train()
        exp_lr_scheduler.step()
        for batch_idx, ((x, _), label, idx) in enumerate(tqdm(train_loader)):
            x = x.to(device) 
            _, feat = model(x)
            prob = feat2prob(feat, model.center)
            loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
            loss_record.update(loss.item(), x.size(0))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
        _, _, _, probs = test(model, eva_loader, args, epoch)
        z_epoch = probs.float().to(device)
        Z = alpha * Z + (1. - alpha) * z_epoch
        z_bars = Z * (1. / (1. - alpha ** (epoch + 1)))

        if epoch % args.update_interval==0:
            print('updating target ...')
            args.p_targets = target_distribution(z_bars).float().to(device) 
    torch.save(model.state_dict(), args.model_dir)
    print("model saved to {}.".format(args.model_dir))
Beispiel #13
0
def train(model, train_loader, unlabeled_eval_loader, args):
    optimizer = Adam(model.parameters(), lr=args.lr)
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer,
                                           step_size=args.step_size,
                                           gamma=args.gamma)
    criterion1 = nn.CrossEntropyLoss()
    criterion2 = BCE()
    for epoch in range(args.epochs):
        loss_record = AverageMeter()
        model.train()
        exp_lr_scheduler.step()
        w = args.rampup_coefficient * ramps.sigmoid_rampup(
            epoch, args.rampup_length)
        for batch_idx, ((x, x_bar), label,
                        idx) in enumerate(tqdm(train_loader)):
            x, x_bar, label = x.to(device), x_bar.to(device), label.to(device)
            output1, output2, feat = model(x)
            output1_bar, output2_bar, _ = model(x_bar)
            prob1, prob1_bar, prob2, prob2_bar = F.softmax(
                output1, dim=1), F.softmax(output1_bar, dim=1), F.softmax(
                    output2, dim=1), F.softmax(output2_bar, dim=1)

            mask_lb = idx < train_loader.labeled_length

            rank_feat = (feat[~mask_lb]).detach()
            rank_idx = torch.argsort(rank_feat, dim=1, descending=True)
            rank_idx1, rank_idx2 = PairEnum(rank_idx)

            rank_idx1, rank_idx2 = rank_idx1[:, :args.
                                             topk], rank_idx2[:, :args.topk]
            rank_idx1, _ = torch.sort(rank_idx1, dim=1)
            rank_idx2, _ = torch.sort(rank_idx2, dim=1)

            rank_diff = rank_idx1 - rank_idx2
            rank_diff = torch.sum(torch.abs(rank_diff), dim=1)
            target_ulb = torch.ones_like(rank_diff).float().to(device)
            target_ulb[rank_diff > 0] = -1

            prob1_ulb, _ = PairEnum(prob2[~mask_lb])
            _, prob2_ulb = PairEnum(prob2_bar[~mask_lb])

            loss_ce = criterion1(output1[mask_lb], label[mask_lb])
            loss_bce = criterion2(prob1_ulb, prob2_ulb, target_ulb)

            consistency_loss = F.mse_loss(prob1, prob1_bar) + F.mse_loss(
                prob2, prob2_bar)

            loss = loss_ce + loss_bce + w * consistency_loss

            loss_record.update(loss.item(), x.size(0))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print('Train Epoch: {} Avg Loss: {:.4f}'.format(
            epoch, loss_record.avg))
        print('test on unlabeled classes')
        args.head = 'head2'
        test(model, unlabeled_eval_loader, args)
Beispiel #14
0
def test(model, test_loader, args):
    model.eval()
    acc_record = AverageMeter()
    for batch_idx, (x, _, label, _) in enumerate(test_loader):
        x = x.to(device)
        _, feat = model(x)
        loss, acc = prototypical_loss(feat, label, n_support=5)
        acc_record.update(acc.item(), x.size(0))
    print('Test: Avg Acc: {:.4f}'.format(acc_record.avg))
Beispiel #15
0
def test(model, test_loader, args):
    model.eval()
    acc_record = AverageMeter()
    for batch_idx, (x, label, _) in enumerate(test_loader):
        x, target = x.to(device), label.to(device)
        output = model(x)
        acc = accuracy(output, target)
        acc_record.update(acc[0].item(), x.size(0))
    print('Test: Avg Acc: {:.4f}'.format(acc_record.avg))
def test(model, device, dataloader, args):
    acc_record = AverageMeter()
    model.eval()
    for batch_idx, (data, label) in enumerate(tqdm(dataloader())):
        data, label = data.to(device), label.to(device)
        output = model(data)

        # measure accuracy and record loss
        acc = accuracy(output, label)
        acc_record.update(acc[0].item(), data.size(0))

    print('Test Acc: {:.4f}'.format(acc_record.avg))
    return acc_record
    def val(self, epoch):
        self.model_s.eval()
        losses = AverageMeter()
        for (data, _, _) in tqdm(self.val_loader):
            data = data.to(self.device)
            with torch.set_grad_enabled(False):
                features_t = self.model_t(data)
                features_s = self.model_s(data)
                loss = cal_loss(features_s, features_t)
                losses.update(loss.item(), data.size(0))
        print('Val Epoch: {} loss: {:.6f}'.format(epoch, losses.avg))

        return losses.avg
def train(epoch, model, device, dataloader, optimizer, exp_lr_scheduler,
          criterion, args):
    loss_record = AverageMeter()
    acc_record = AverageMeter()
    exp_lr_scheduler.step()
    model.train()
    for batch_idx, (data, label) in enumerate(tqdm(dataloader(epoch))):
        data, label = data.to(device), label.to(device)
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output, label)

        # measure accuracy and record loss
        acc = accuracy(output, label)
        acc_record.update(acc[0].item(), data.size(0))
        loss_record.update(loss.item(), data.size(0))

        # compute gradient and do optimizer step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    print('Train Epoch: {} Avg Loss: {:.4f} \t Avg Acc: {:.4f}'.format(
        epoch, loss_record.avg, acc_record.avg))

    return loss_record
Beispiel #19
0
def train_epoch(model, data_loader, criterion, optimizer, device, opt):
   
	model.train()
	
	losses = AverageMeter('Loss', ':.2f')
	accuracies = AverageMeter('Acc', ':.2f')
	progress = ProgressMeter(
        len(data_loader),
        [losses, accuracies],
        prefix='Train: ')
	# Training
	for batch_idx, (data, targets) in enumerate(data_loader):
		# compute outputs
		data, targets = data.to(device), targets.to(device)

		outputs =  model(data)
		loss = criterion(outputs, targets)

		acc = accuracy(outputs, targets)
		losses.update(loss.item(), data.size(0))
		accuracies.update(acc[0].item(),  data.size(0))

		optimizer.zero_grad()
		loss.backward()
		optimizer.step()

		# show information
		if batch_idx % opt.log_interval == 0:
			progress.display(batch_idx)
		
	# show information
	print(f' * Train Loss {losses.avg:.3f}, Train Acc {accuracies.avg:.3f}')
	return losses.avg, accuracies.avg
Beispiel #20
0
    def __init__(self, criterion, optimizer, g_optimizer, scheduler, writer,
                 device, lookup_table, prior_pool, CONFIG):
        self.top1 = AverageMeter()
        self.top5 = AverageMeter()
        self.losses = AverageMeter()
        self.hc_losses = AverageMeter()

        self.writer = writer
        self.device = device

        self.criterion = criterion
        self.optimizer = optimizer
        self.g_optimizer = g_optimizer
        self.scheduler = scheduler

        self.CONFIG = CONFIG

        self.epochs = self.CONFIG.epochs
        self.warmup_epochs = self.CONFIG.warmup_epochs
        self.search_epochs = self.CONFIG.search_epochs

        self.prior_pool = prior_pool
        # ==============
        self.hardware_pool = [
            i for i in range(self.CONFIG.low_flops, self.CONFIG.high_flops, 5)
        ]
        self.hardware_index = 0
        random.shuffle(self.hardware_pool)
        # ==============

        self.lookup_table = lookup_table
Beispiel #21
0
    def test(self, val_loader, net, criterion):
        top1 = AverageMeter()
        top5 = AverageMeter()
        print_freq = 100
        # switch to evaluate mode
        net.eval()
        with torch.no_grad():
            for i, (input, label) in enumerate(val_loader):
                target = label.cuda()
                input = input.cuda()
                # forwardclea
                prob1, cam_top1, M_p = net(input)
                crop_img = attention_crop_test(M_p, input, config.mask_test_th)
                crop_img = crop_img.cuda()
                prob2, cam_top1_2, _ = net(crop_img)

                # measure accuracy and record loss
                out = (F.softmax(prob1, dim=-1) + F.softmax(prob2, dim=-1)) / 2
                prec1, prec5 = accuracy(out, target, topk=(1, 5))
                top1.update(prec1[0], input.size(0))
                top5.update(prec5[0], input.size(0))

                if i % print_freq == 0:
                    print('Test: [{0}/{1}]\t'
                          'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                          'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                              i, len(val_loader), top1=top1, top5=top5))

            print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'.format(
                top1=top1, top5=top5))
        return top1.avg, top5.avg
Beispiel #22
0
def train(model, train_loader, args):
    optimizer = Adam(model.parameters(), lr=args.lr)
    exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer,
                                                milestones=args.milestones,
                                                gamma=args.gamma)
    criterion = nn.CrossEntropyLoss().cuda(device)
    for epoch in range(args.epochs):
        loss_record = AverageMeter()
        acc_record = AverageMeter()
        model.train()
        exp_lr_scheduler.step()
        for batch_idx, (x, label, _) in enumerate(train_loader):
            x, target = x.to(device), label.to(device)
            optimizer.zero_grad()
            output = model(x)
            loss = criterion(output, target)
            acc = accuracy(output, target)
            loss.backward()
            optimizer.step()
            acc_record.update(acc[0].item(), x.size(0))
            loss_record.update(loss.item(), x.size(0))
        print('Train Epoch: {} Avg Loss: {:.4f} \t Avg Acc: {:.4f}'.format(
            epoch, loss_record.avg, acc_record.avg))
        test(model, eva_loader, args)
    torch.save(model.state_dict(), args.model_dir)
    print("model saved to {}.".format(args.model_dir))
Beispiel #23
0
def evaluate_cam(val_loader, model, criterion, args):
    losses = AverageMeter('Loss', ':.4e')
    ap = APMeter()

    # switch to evaluate mode
    model.eval()

    # Image de-standardization
    image_mean_value = [0.485, .456, .406]
    image_std_value = [.229, .224, .225]
    image_mean_value = torch.reshape(torch.tensor(image_mean_value), (1, 3, 1, 1))
    image_std_value = torch.reshape(torch.tensor(image_std_value), (1, 3, 1, 1))

    with torch.no_grad():
        for i, (images, target, image_id) in enumerate(tqdm(val_loader, desc='Evaluate')):
            images = images.cuda(args.gpu, non_blocking=True)
            target = target.cuda(args.gpu, non_blocking=True)

            # compute output
            output = model(images)
            loss = criterion(output, target)

            # measure accuracy and record loss
            ap.add(output.detach(), target)
            losses.update(loss.item(), images.size(0))

            # image de-normalizing
            images = images.clone().detach().cpu() * image_std_value + image_mean_value
            images = images.numpy().transpose(0, 2, 3, 1) * 255.
            images = images[:, :, :, ::-1]

            # extract CAM
            cam = get_cam_all_class(model, target)
            cam = cam.cpu().numpy().transpose(0, 2, 3, 1)

            # for all class
            for j in range(cam.shape[0]):
                blend_tensor = torch.empty((cam.shape[3], 3, 321, 321))
                for k in range(cam.shape[3]):
                    cam_ = resize_cam(cam[j, :, :, k])
                    blend, heatmap = blend_cam(images[j], cam_)
                    if target[j, k]:
                        blend = mark_target(blend, text=CAT_LIST[k])
                    blend = blend[:, :, ::-1] / 255.
                    blend = blend.transpose(2, 0, 1)
                    blend_tensor[k] = torch.tensor(blend)
                save_images('result', i, j, blend_tensor, args)

    return ap.value(), losses.avg
Beispiel #24
0
def warmup_train(model, alphabetStr, train_loader, eval_loader, args):
    optimizer = Adam(model.parameters(), lr=args.warmup_lr)
    for epoch in range(args.warmup_epochs):
        loss_record = AverageMeter()
        model.train()
        for batch_idx, (x, g_x, _, idx) in enumerate(train_loader):
            _,  feat = model(x.to(device))
            prob = feat2prob(feat, model.center)
            loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            loss_record.update(loss.item(), x.size(0))
        print('Warmup Train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
        test(model, eval_loader, args)
Beispiel #25
0
    def __init__(self, criterion, optimizer, scheduler, device, CONFIG, *args,
                 **kwargs):
        self.losses = AverageMeter()
        self.seg_losses = AverageMeter()

        self.device = device

        self.criterion = criterion
        self.optimizer = optimizer
        self.scheduler = scheduler

        self.CONFIG = CONFIG

        self.epochs = self.CONFIG.epochs
        self.threshold = np.array(self.CONFIG.threshold)
Beispiel #26
0
def train(model, model_ema, train_loader, labeled_eval_loader, unlabeled_eval_loader, args):

    optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)
    criterion1 = nn.CrossEntropyLoss() 
    criterion2 = BCE() 
    for epoch in range(args.epochs):
        loss_record = AverageMeter()
        model.train()
        model_ema.train()
        exp_lr_scheduler.step()
        w = args.rampup_coefficient * ramps.sigmoid_rampup(epoch, args.rampup_length) 
        for batch_idx, ((x, x_bar), label, idx) in enumerate(tqdm(train_loader)):
            x, x_bar, label = x.to(device), x_bar.to(device), label.to(device)

            output1, output2, feat = model(x)
            output1_bar, output2_bar, _ = model(x_bar)

            with torch.no_grad():
                output1_ema, output2_ema, feat_ema = model_ema(x)
                output1_bar_ema, output2_bar_ema, _ = model_ema(x_bar)
            prob1, prob1_bar, prob2, prob2_bar = F.softmax(output1, dim=1),  F.softmax(output1_bar, dim=1), F.softmax(output2, dim=1), F.softmax(output2_bar, dim=1)
            prob1_ema, prob1_bar_ema, prob2_ema, prob2_bar_ema = F.softmax(output1_ema, dim=1),  F.softmax(output1_bar_ema, dim=1), F.softmax(output2_ema, dim=1), F.softmax(output2_bar_ema, dim=1)

            mask_lb = label<args.num_labeled_classes

            loss_ce = criterion1(output1[mask_lb], label[mask_lb])
            loss_bce = rank_bce(criterion2,feat,mask_lb,prob2,prob2_bar)

            consistency_loss = F.mse_loss(prob1, prob1_bar) + F.mse_loss(prob2, prob2_bar)
            consistency_loss_ema = F.mse_loss(prob1, prob1_bar_ema) + F.mse_loss(prob2, prob2_bar_ema)

            loss = loss_ce + loss_bce + w * consistency_loss + w * consistency_loss_ema #+ smooth_loss(feat,mask_lb) #+ MCR(feat, idx)

            loss_record.update(loss.item(), x.size(0))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            _update_ema_variables(model, model_ema, 0.99, epoch * len(train_loader) + batch_idx)

        print('Train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
        print('test on labeled classes')
        args.head = 'head1'
        test(model, labeled_eval_loader, args)
        print('test on unlabeled classes')
        args.head='head2'
        test(model, unlabeled_eval_loader, args)
        test(model_ema, unlabeled_eval_loader, args)
Beispiel #27
0
def evaluate_cam3(val_loader, model, criterion, args):
    losses = AverageMeter('Loss', ':.4e')
    ap = APMeter()

    size_dict = load_sizes(args.size_list)

    # switch to evaluate mode
    model.eval()

    # Image de-standardization
    image_mean_value = [0.485, .456, .406]
    image_std_value = [.229, .224, .225]
    image_mean_value = torch.reshape(torch.tensor(image_mean_value), (1, 3, 1, 1))
    image_std_value = torch.reshape(torch.tensor(image_std_value), (1, 3, 1, 1))

    with torch.no_grad():
        for i, (images, target, image_id) in enumerate(tqdm(val_loader, desc='Evaluate')):
            images = images.cuda(args.gpu, non_blocking=True)
            target = target.cuda(args.gpu, non_blocking=True)

            # compute output
            output = model(images)
            loss = criterion(output, target)

            # measure accuracy and record loss
            ap.add(output.detach(), target)
            losses.update(loss.item(), images.size(0))

            # image de-normalizing
            images = images.clone().detach().cpu() * image_std_value + image_mean_value
            images = images.numpy().transpose(0, 2, 3, 1) * 255.
            images = images[:, :, :, ::-1]

            # extract CAM
            cam = get_cam_target_class(model)
            cam = cam.cpu().numpy().transpose(0, 2, 3, 1)

            for j in range(cam.shape[0]):
                cam_ = resize_threshold_cam(cam[j],
                                            size=(size_dict[image_id[j]][0], size_dict[image_id[j]][1]),
                                            thresh=0.3)
                cam_max = np.argmax(cam_, axis=2)

                save_result_image('final_map', cam_max, image_id[j], args)


    return ap.value(), losses.avg
Beispiel #28
0
def val(model, loader, device):
    top1 = AverageMeter()

    model.eval()
    start_time = time.time()

    with torch.no_grad():
        for step, (X, y) in enumerate(loader):
            X, y = X.to(device), y.to(device)
            N = X.shape[0]

            outs = model(X)

            prec1 = accuracy(outs, y, topk=(1,))[0]
            top1.update(prec1.item(), N)
        top1_avg = top1.get_avg()
        logging.info("Test: Final Prec@1 {:.2%} Time {:.2f}".format(top1_avg, time.time()-start_time))
Beispiel #29
0
def warmup_train(model, train_loader, eva_loader, args):
    optimizer = SGD(model.parameters(), lr=args.warmup_lr, momentum=args.momentum, weight_decay=args.weight_decay)
    for epoch in range(args.warmup_epochs):
        loss_record = AverageMeter()
        model.train()
        for batch_idx, ((x, _), label, idx) in enumerate(tqdm(train_loader)):
            x = x.to(device)
            _, feat = model(x)
            prob = feat2prob(feat, model.center)
            loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
            loss_record.update(loss.item(), x.size(0))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print('Warmup_train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
        _, _, _, probs = test(model, eva_loader, args, epoch)
    args.p_targets = target_distribution(probs) 
Beispiel #30
0
def TE_train(model, train_loader, eva_loader, args):
    optimizer = SGD(model.parameters(),
                    lr=args.lr,
                    momentum=args.momentum,
                    weight_decay=args.weight_decay)
    w = 0
    alpha = 0.6
    ntrain = len(train_loader.dataset)
    Z = torch.zeros(ntrain,
                    args.n_clusters).float().to(device)  # intermediate values
    z_ema = torch.zeros(ntrain,
                        args.n_clusters).float().to(device)  # temporal outputs
    z_epoch = torch.zeros(ntrain, args.n_clusters).float().to(
        device)  # current outputs
    for epoch in range(args.epochs):
        loss_record = AverageMeter()
        model.train()
        w = args.rampup_coefficient * ramps.sigmoid_rampup(
            epoch, args.rampup_length)
        for batch_idx, ((x, _), label, idx) in enumerate(tqdm(train_loader)):
            x = x.to(device)
            feat = model(x)
            prob = feat2prob(feat, model.center)
            z_epoch[idx, :] = prob
            prob_bar = Variable(z_ema[idx, :], requires_grad=False)
            sharp_loss = F.kl_div(prob.log(),
                                  args.p_targets[idx].float().to(device))
            consistency_loss = F.mse_loss(prob, prob_bar)
            loss = sharp_loss + w * consistency_loss
            loss_record.update(loss.item(), x.size(0))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        Z = alpha * Z + (1. - alpha) * z_epoch
        z_ema = Z * (1. / (1. - alpha**(epoch + 1)))
        print('Train Epoch: {} Avg Loss: {:.4f}'.format(
            epoch, loss_record.avg))
        acc, _, _, probs = test(model, eva_loader, args)

        if epoch % args.update_interval == 0:
            print('updating target ...')
            args.p_targets = target_distribution(probs)
    torch.save(model.state_dict(), args.model_dir)
    print("model saved to {}.".format(args.model_dir))