예제 #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=256)
    parser.add_argument('--loss', default='softmax_loss',
                        choices=['softmax_loss', 'center_loss', 'sphere_face_loss', 'cos_face_loss', 'arc_face_loss'])
    parser.add_argument('--viz', default='vizs')
    parser.add_argument('--epochs', type=int, default=30)
    parser.add_argument('--lr', type=float, default=0.001)
    args = parser.parse_args()

    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 8, 'pin_memory': True} if use_cuda else {}

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])

    train_loader = DataLoader(
        datasets.MNIST('../data', train=True, download=True, transform=transform),
        batch_size=args.batch_size, shuffle=True, **kwargs)

    test_loader = DataLoader(
        datasets.MNIST('../data', train=False, transform=transform),
        batch_size=512, shuffle=True, **kwargs)

    model = Net().to(device)

    if args.loss == 'center_loss':
        criterion = CenterLoss().to(device)
        center_optimizer = optim.SGD([criterion.centers], lr=args.lr, momentum=0.9)
    elif args.loss == 'sphere_face_loss':
        criterion = SphereFaceLoss().to(device)
    elif args.loss == 'cos_face_loss':
        criterion = CosFaceLoss(s=7, m=0.2).to(device)
    elif args.loss == 'softmax_loss':
        criterion = SoftmaxLoss().to(device)
    elif args.loss == 'arc_face_loss':
        criterion = ArcFaceLoss().to(device)

    optimizer = optim.SGD([{'params': model.parameters()}, {'params': criterion.fc.parameters()}],
                          lr=args.lr, momentum=0.9)

    for epoch in range(1, args.epochs + 1):
        model.train()
        embeddings = []
        labels = []
        for batch_idx, (data, target) in enumerate(train_loader):
            data, target = data.to(device), target.to(device)

            optimizer.zero_grad()
            if args.loss == 'center_loss':
                center_optimizer.zero_grad()
            embedding = model(data)

            loss = criterion(embedding, target)
            loss.backward()
            optimizer.step()
            if args.loss == 'center_loss':
                center_optimizer.step()

            embeddings.append(embedding)
            labels.append(target)

            if batch_idx % 100 == 0:
                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                    epoch, batch_idx * len(data), len(train_loader.dataset),
                           100. * batch_idx / len(train_loader), loss.item()))

        embeddings = torch.cat(embeddings, 0).cpu().detach().numpy()
        labels = torch.cat(labels, 0).cpu().detach().numpy()
        acc = val(model, criterion, device, test_loader)
        visualize(args.viz, args.loss, embeddings, labels, epoch, acc)

    print('Creating gif...')
    create_gif('./%s/gifs/%s.gif' % (args.viz, args.loss),
               './%s/%s' % (args.viz, args.loss), 0.2)
    print('Done')
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_dataset(name=args.dataset)

    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    trainloader = DataLoader(
        ImageDataset(dataset.train, transform=transform_train),
        batch_size=args.train_batch,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids,
                              loss={'cent'})
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    criterion_xent = CrossEntropyLabelSmooth(
        num_classes=dataset.num_train_pids, use_gpu=use_gpu)
    criterion_cent = CenterLoss(num_classes=dataset.num_train_pids,
                                feat_dim=model.feat_dim,
                                use_gpu=use_gpu)

    optimizer_model = torch.optim.Adam(model.parameters(),
                                       lr=args.lr,
                                       weight_decay=args.weight_decay)
    optimizer_cent = torch.optim.SGD(criterion_cent.parameters(),
                                     lr=args.lr_cent)

    if args.stepsize > 0:
        scheduler = lr_scheduler.StepLR(optimizer_model,
                                        step_size=args.stepsize,
                                        gamma=args.gamma)
    start_epoch = args.start_epoch

    if args.resume:
        print("Loading checkpoint from '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        test(model, queryloader, galleryloader, use_gpu)
        return

    start_time = time.time()
    best_rank1 = -np.inf

    for epoch in range(start_epoch, args.max_epoch):
        print("==> Epoch {}/{}".format(epoch + 1, args.max_epoch))

        train(model, criterion_xent, criterion_cent, optimizer_model,
              optimizer_cent, trainloader, use_gpu)

        if args.stepsize > 0: scheduler.step()

        if args.eval_step > 0 and (epoch + 1) % args.eval_step == 0 or (
                epoch + 1) == args.max_epoch:
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1
            if is_best: best_rank1 = rank1

            save_checkpoint(
                {
                    'state_dict': model.state_dict(),
                    'rank1': rank1,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
예제 #3
0
def train_epoch(train_loader,
                model,
                loss_fn,
                optimzer,
                k,
                n_K,
                n_classes,
                rm_zero,
                global_loss=False,
                train_embeddeds=None,
                train_targets=None,
                gamma=0.01,
                center_sigma=-1.0,
                method='kTriplet',
                use_cross_entropy=False):
    model.train()
    losses_triplet = []
    losses_inner_class = []
    losses_center = []
    losses_cross_entropy = []
    losses = []
    center_loss = CenterLoss(n_classes, mean_center=True).cuda()
    cross_entropy = nn.CrossEntropyLoss().cuda()

    if global_loss:
        in_mat, out_mat = get_global_distance_map(train_embeddeds,
                                                  train_targets, k, n_K)
        data_id = 0

    for batch_idx, (data, target) in enumerate(
            tqdm(train_loader, ncols=70, desc="Train")):
        data = data.cuda()
        target = target.cuda()

        # !note, for Triplet, here just the embeddings, for Clas, it's predict
        outputs = model(data)
        embeddeds = outputs

        if method == 'kTriplet':
            anchor, positive, negative = generate_k_triplet(embeddeds,
                                                            target,
                                                            K=k,
                                                            B=n_K)
        elif method == 'batchHardTriplet':
            anchor, positive, negative = generate_batch_hard_triplet(
                embeddeds, target)
        elif method == 'batchAllTriplet' or method == 'batchSemiHardTriplet':
            anchor, positive, negative = generate_all_triplet(
                embeddeds, target)
        else:
            raise ValueError

        triplet_loss = loss_fn(anchor, positive, negative)

        if method == 'batchSemiHardTriplet':
            semi = torch.nonzero((triplet_loss <= loss_fn.margin)
                                 & (triplet_loss > 0))
            triplet_loss.index_select(dim=0, index=semi.squeeze())

        if rm_zero:
            non_zero = torch.nonzero(triplet_loss.cpu().data).size(0)
            if non_zero == 0:
                loss_triplet = triplet_loss.mean()
            else:
                loss_triplet = (triplet_loss / non_zero).sum()
        else:
            loss_triplet = triplet_loss.mean()

        if gamma > 0:
            loss_inner_class = torch.log1p(
                (anchor - positive).pow(2).sum(1)).mean()
            loss = loss_triplet + gamma * loss_inner_class
        elif gamma == 0:
            loss_inner_class = np.mean(
                np.sum(np.power(
                    np.log1p((anchor.cpu().detach().numpy() -
                              positive.cpu().detach().numpy())), 2),
                       axis=1))
            loss_inner_class = torch.tensor(loss_inner_class)
            loss = loss_triplet
        else:
            loss_inner_class = torch.tensor(0)
            loss = loss_triplet

        if center_sigma > 0:
            closs = center_sigma * center_loss(embeddeds, target)
            loss += closs
            losses_center.append(closs.item())

        if global_loss:
            g_pos, g_neg = get_global_data(
                train_loader.dataset, in_mat, out_mat,
                np.array(range(data_id, data_id + train_loader.batch_size)))
            data_id += train_loader.batch_size
            g_pos = g_pos.cuda()
            g_neg = g_neg.cuda()
            out_p = model(g_pos)
            out_n = model(g_neg)

            anchor, positive, negative = makeup_global_triplet(
                embeddeds, out_p, out_n, k, n_K)
            global_loss = loss_fn(anchor, positive, negative)

            loss += global_loss

        # TODO
        # add cross entropy
        if use_cross_entropy:
            #ce_loss = cross_entropy(, target)
            #loss = loss + ce_loss
            print('NOt Implement!')
            sys.exit(-1)
            pass
        else:
            ce_loss = torch.tensor(0)

        optimzer.zero_grad()
        loss.backward()
        optimzer.step()

        losses_triplet.append(loss_triplet.item())
        losses_inner_class.append(loss_inner_class.item())
        losses_cross_entropy.append(ce_loss.item())

        losses.append(loss.item())

    average_triplet_loss = sum(losses_triplet) / len(train_loader)
    average_inner_class = sum(losses_inner_class) / len(train_loader)
    average_center_loss = sum(losses_center) / len(train_loader)
    average_cross_entropy = sum(losses_cross_entropy) / len(train_loader)
    total_loss = sum(losses) / len(train_loader)

    return total_loss, average_triplet_loss, average_inner_class, average_center_loss, average_cross_entropy
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_img_dataset(
        root=args.root,
        name=args.dataset,
        split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled,
        cuhk03_classic_split=args.cuhk03_classic_split,
    )

    transform_train = T.Compose([
        T.Resize((args.height, args.width)),
        T.RandomHorizontalFlip(p=0.5),
        T.Pad(10),
        T.RandomCrop([args.height, args.width]),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        torchvision.transforms.RandomErasing(p=0.5,
                                             scale=(0.02, 0.4),
                                             ratio=(0.3, 3.33),
                                             value=(0.4914, 0.4822, 0.4465))
        # T.RandomErasing(probability=0.5, sh=0.4, mean=(0.4914, 0.4822, 0.4465)),
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    trainloader = DataLoader(
        ImageDataset(dataset.train, transform=transform_train),
        sampler=RandomIdentitySampler2(dataset.train,
                                       batch_size=args.train_batch,
                                       num_instances=args.num_instances),
        batch_size=args.train_batch,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids,
                              loss={'xent', 'htri', 'cent'})
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    criterion_xent = CrossEntropyLabelSmooth(
        num_classes=dataset.num_train_pids, use_gpu=use_gpu)
    criterion_htri = TripletLoss(margin=args.margin)
    criterion_cent = CenterLoss(num_classes=dataset.num_train_pids,
                                feat_dim=model.feat_dim,
                                use_gpu=use_gpu)

    optimizer_model = init_optim(args.optim, model.parameters(), args.lr,
                                 args.weight_decay)
    optimizer_cent = torch.optim.SGD(criterion_cent.parameters(),
                                     lr=args.lr_cent)
    '''only the optimizer_model use learning rate schedule'''
    # if args.stepsize > 0:
    #     scheduler = lr_scheduler.StepLR(optimizer_model, step_size=args.stepsize, gamma=args.gamma)
    '''------Modify lr_schedule here------'''
    current_schedule = init_lr_schedule(schedule=args.schedule,
                                        warm_up_epoch=args.warm_up_epoch,
                                        half_cos_period=args.half_cos_period,
                                        lr_milestone=args.lr_milestone,
                                        gamma=args.gamma,
                                        stepsize=args.stepsize)

    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer_model,
                                                  lr_lambda=current_schedule)
    '''------Please refer to the args.xxx for details of hyperparams------'''
    #embed()
    start_epoch = args.start_epoch

    if args.resume:
        print("Loading checkpoint from '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        test(model, queryloader, galleryloader, use_gpu)
        return

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Start training")

    for epoch in range(start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(epoch, model, criterion_xent, criterion_htri, criterion_cent,
              optimizer_model, optimizer_cent, trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        if args.schedule: scheduler.step()

        if (epoch + 1) > args.start_eval and args.eval_step > 0 and (
                epoch + 1) % args.eval_step == 0 or (epoch +
                                                     1) == args.max_epoch:
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': rank1,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(
        best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
예제 #5
0
    fc = torch.nn.Linear(2, 2).cuda()
    layer = Membership_norm(2, 4,
                            init_c=-5 * torch.ones((2, 4), dtype=torch.float),
                            init_lamda=4 * torch.ones((2, 4), dtype=torch.float)).cuda()

    # x = torch.tensor([[[0.9, 0.1], [0.9, 0.1]], [[-0.9, 0.1], [-0.1, -2.5]]], dtype=torch.float, requires_grad=True)
    # x2 = x ** 2
    # print(x2.requires_grad)
    # print(x.shape)
    # print(layer(x))
    # print(layer.c)
    # print(x.shape)
    # loss_focal = torch.nn.MSELoss()
    loss_focal = FocalLoss()
    loss_center = CenterLoss()
    para = [
        {"params": fc.parameters(), "lr": 1e-3},
        {"params": layer.c, "lr": 1e-3},
        {"params": layer.lamda, "lr": 1e-3},
    ]

    # optim = torch.optim.SGD(para)
    optim = torch.optim.Adam(para)
    # bestloss = 1e5
    # bestnetweightfc = []
    # bestnetweightlayer = []

    for i in range(0, 100000):
        h = fc(x_in_tensor).unsqueeze(2)
        y = layer(h)
예제 #6
0
    def __init__(self, config):
        super(Trainer, self).__init__(config)
        self.datamanager = DataManger(config["data"])

        # model
        self.model = Baseline(
            num_classes=self.datamanager.datasource.get_num_classes("train")
        )

        # summary model
        summary(
            self.model,
            input_size=(3, 256, 128),
            batch_size=config["data"]["batch_size"],
            device="cpu",
        )

        # losses
        cfg_losses = config["losses"]
        self.criterion = Softmax_Triplet_loss(
            num_class=self.datamanager.datasource.get_num_classes("train"),
            margin=cfg_losses["margin"],
            epsilon=cfg_losses["epsilon"],
            use_gpu=self.use_gpu,
        )

        self.center_loss = CenterLoss(
            num_classes=self.datamanager.datasource.get_num_classes("train"),
            feature_dim=2048,
            use_gpu=self.use_gpu,
        )

        # optimizer
        cfg_optimizer = config["optimizer"]
        self.optimizer = torch.optim.Adam(
            self.model.parameters(),
            lr=cfg_optimizer["lr"],
            weight_decay=cfg_optimizer["weight_decay"],
        )

        self.optimizer_centerloss = torch.optim.SGD(
            self.center_loss.parameters(), lr=0.5
        )

        # learing rate scheduler
        cfg_lr_scheduler = config["lr_scheduler"]
        self.lr_scheduler = WarmupMultiStepLR(
            self.optimizer,
            milestones=cfg_lr_scheduler["steps"],
            gamma=cfg_lr_scheduler["gamma"],
            warmup_factor=cfg_lr_scheduler["factor"],
            warmup_iters=cfg_lr_scheduler["iters"],
            warmup_method=cfg_lr_scheduler["method"],
        )

        # track metric
        self.train_metrics = MetricTracker("loss", "accuracy")
        self.valid_metrics = MetricTracker("loss", "accuracy")

        # save best accuracy for function _save_checkpoint
        self.best_accuracy = None

        # send model to device
        self.model.to(self.device)

        self.scaler = GradScaler()

        # resume model from last checkpoint
        if config["resume"] != "":
            self._resume_checkpoint(config["resume"])
예제 #7
0
        aux_loss = PerpetualOrthogonalProjectionLoss(
            num_classes=100,
            feat_dim=embedding_dim[args.net],
            no_norm=False,
            use_attention=False)
        params = list(net.parameters()) + list(aux_loss.parameters())
    else:
        aux_loss = OrthogonalProjectionLoss(no_norm=False, use_attention=False)

    if args.hnc:
        hnc_loss = cam_loss_kd_topk()
    else:
        hnc_loss = None

    if args.cl:
        center_loss = CenterLoss(num_classes=100, feat_dim=2048, use_gpu=True)
        params = list(net.parameters()) + list(center_loss.parameters())
    else:
        optimizer = optim.SGD(params=params,
                              lr=args.lr,
                              momentum=0.9,
                              weight_decay=5e-4)

    train_scheduler = optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=settings.MILESTONES,
        gamma=0.2)  # learning rate decay
    iter_per_epoch = len(training_loader)
    warmup_scheduler = WarmUpLR(optimizer, iter_per_epoch * args.warm)

    if args.resume:
        if args.pth is not None: