예제 #1
0
    def save(self, save_optimizer=True, better=False, save_path=None):
        save_dict = dict()

        save_dict['model'] = self.water_net.state_dict()
        save_dict['config'] = opt._state_dict()
        # save_dict['vis_info'] = self.vis.state_dict()
        save_dict['optimizer'] = self.optimizer.state_dict()

        if save_optimizer:
            save_dict['optimizer'] = self.optimizer.state_dict()

        if better:
            save_path = 'cur_best_params'
        else:
            # save_path = opt.save_path
            if opt.customize:
                save_name = 'model' + '_self_' + opt.arch + '_' + opt.optim + opt.kind + 'params.tar'
            else:
                save_name = 'model' + '_default_' + opt.arch + '_' + opt.optim + opt.kind + 'params.tar'
            save_path = os.path.join(opt.save_path, save_name)
            save_dir = os.path.dirname(save_path)
            if not os.path.exists(save_dir):
                os.makedirs(save_dir)

        print(save_path)
        t.save(save_dict, save_path)
        # self.vis.save([self.vis.env])
        return save_path
예제 #2
0
    def save(self, save_optimizer=False, save_path=None, **kwargs):
        """serialize models include optimizer and other info
        return path where the model-file is stored.

        Args:
            save_optimizer (bool): whether save optimizer.state_dict().
            save_path (string): where to save model, if it's None, save_path
                is generate using time str and info from kwargs.

        Returns:
            save_path(str): the path to save models.
        """
        save_dict = dict()

        save_dict['model'] = self.faster_rcnn.state_dict()
        save_dict['config'] = opt._state_dict()
        save_dict['other_info'] = kwargs
        save_dict['vis_info'] = self.vis.state_dict()

        if save_optimizer:
            save_dict['optimizer'] = self.optimizer.state_dict()

        if save_path is None:
            timestr = time.strftime('%m%d%H%M')
            save_path = 'checkpoints/fasterrcnn_%s' % timestr
            for k_, v_ in kwargs.items():
                save_path += '_%s' % v_

        save_dir = os.path.dirname(save_path)
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        t.save(save_dict, save_path)
        self.vis.save([self.vis.env])
        return save_path
예제 #3
0
def test(**kwargs):
    opt._parse(kwargs)

    os.makedirs(opt.save_dir, exist_ok=True)
    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing dataset {}'.format(opt.dataset))
    dataset = data_manager.init_dataset(name=opt.dataset, use_all=opt.use_all)

    # load data
    pin_memory = True if use_gpu else False
    dataloader = load_data(dataset, pin_memory)
    print('111')
    print(dataloader['query'].dataset.dataset[0][0])

    print('initializing model ...')
    if opt.loss == 'softmax' or opt.loss == 'softmax_triplet':
        model = ResNetBuilder(dataset.num_train_pids, opt.last_stride, True)
    elif opt.loss == 'triplet':
        model = ResNetBuilder(None, opt.last_stride, True)

    if opt.pretrained_model:
        if use_gpu:
            state_dict = torch.load(opt.pretrained_model)['state_dict']
        else:
            state_dict = torch.load(opt.pretrained_model,
                                    map_location='cpu')['state_dict']
        model.load_state_dict(state_dict, False)
        print('load pretrained model ' + opt.pretrained_model)

    print('model size: {:.5f}M'.format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    if use_gpu:
        model = nn.DataParallel(model).cuda()
    reid_evaluator = ResNetEvaluator(model)

    reid_evaluator.test(dataloader['query'],
                        dataloader['gallery'],
                        savefig=opt.savefig,
                        i=opt.findid)
    return
예제 #4
0
def test_cycle_gan(**kwargs):
    opt._parse(kwargs)
    torch.manual_seed(opt.seed)
    torch.cuda.manual_seed(opt.seed)
    np.random.seed(opt.seed)
    random.seed(opt.seed)
    # Write standard output into file
    sys.stdout = Logger(os.path.join(opt.save_dir, 'log_test.txt'))

    print('========user config========')
    pprint(opt._state_dict())
    print('===========end=============')
    if opt.use_gpu:
        print('currently using GPU')
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    pin_memory = True if opt.use_gpu else False
    print('initializing dataset {}'.format(opt.dataset_mode))
    dataset = UnalignedDataset(opt)
    testloader = DataLoader(dataset,
                            opt.batchSize,
                            True,
                            num_workers=opt.workers,
                            pin_memory=pin_memory)

    summaryWriter = SummaryWriter(os.path.join(opt.save_dir,
                                               'tensorboard_log'))

    print('initializing model ... ')
    netG_A, netG_B, netD_A, netD_B = load_checkpoint(opt)
    start_epoch = opt.start_epoch
    if opt.use_gpu:
        netG_A = torch.nn.DataParallel(netG_A).cuda()
        netG_B = torch.nn.DataParallel(netG_B).cuda()
        netD_A = torch.nn.DataParallel(netD_A).cuda()
        netD_B = torch.nn.DataParallel(netD_B).cuda()

    # get tester
    cycleganTester = Tester(opt, netG_A, netG_B, netD_A, netD_B, summaryWriter)

    for epoch in range(start_epoch, opt.max_epoch):
        # test over whole dataset
        cycleganTester.test(epoch, testloader)
예제 #5
0
    def save(self, save_optimizer=False, save_path=None, **kwargs):
        save_dict = dict()
        save_dict['model'] = self.faster_rcnn.state_dict()
        save_dict['config'] = opt._state_dict()
        save_dict['other_info'] = kwargs
        # save_dict['vis_info'] = self.vis.state_dict()

        if save_optimizer:
            save_dict['optimizer'] = self.optimizer.state_dict()

        if save_path is None:
            timestr = time.strftime('%m%d%H%M')
            save_path = 'checkpoints/fasterrcnn_%s' % timestr
            for k_, v_ in kwargs.items():
                save_path += '_%s' % v_

        save_dir = os.path.dirname(save_path)
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        t.save(save_dict, save_path)
        return save_path
예제 #6
0
def multi_test(**kwargs):
    opt._parse(kwargs)

    # set random seed and cudnn benchmark
    # torch.manual_seed(opt.seed)
    os.makedirs(opt.save_dir, exist_ok=True)
    use_gpu = torch.cuda.is_available()
    # sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing tx_chanllege dataset')

    pin_memory = True if use_gpu else False
    query_dataset = Tx_dataset(set='query_a',
                               file_list='query_a_list.txt').dataset
    gallery_dataset = Tx_dataset(set='gallery_a',
                                 file_list='gallery_a_list.txt').dataset

    queryloader = DataLoader(ImageDataset(query_dataset,
                                          transform=build_transforms(
                                              opt, is_train=False)),
                             batch_size=opt.test_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory)

    galleryloader = DataLoader(ImageDataset(gallery_dataset,
                                            transform=build_transforms(
                                                opt, is_train=False)),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)

    queryFliploader = DataLoader(ImageDataset(query_dataset,
                                              transform=build_transforms(
                                                  opt,
                                                  is_train=False,
                                                  flip=True)),
                                 batch_size=opt.test_batch,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    galleryFliploader = DataLoader(ImageDataset(gallery_dataset,
                                                transform=build_transforms(
                                                    opt,
                                                    is_train=False,
                                                    flip=True)),
                                   batch_size=opt.test_batch,
                                   num_workers=opt.workers,
                                   pin_memory=pin_memory)

    print('initializing model ...')

    model = build_model(opt)

    if opt.pretrained_choice == 'self':
        state_dict = torch.load(opt.pretrained_model)['state_dict']
        # state_dict = {k: v for k, v in state_dict.items() \
        #        if not ('reduction' in k or 'softmax' in k)}
        model.load_state_dict(state_dict, False)
        print('load pretrained model ' + opt.pretrained_model)
    print('model size: {:.5f}M'.format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    # opt.model_name = "pcb"

    pcb_model = build_model(opt)

    if opt.pretrained_choice == 'self':
        state_dict = torch.load(
            '/data/zhoumi/train_project/REID/tx_challenge/pytorch-ckpt/r50_ibn_a_bigsize_era/model_best.pth.tar'
        )['state_dict']
        # state_dict = {k: v for k, v in state_dict.items() \
        #        if not ('reduction' in k or 'softmax' in k)}
        pcb_model.load_state_dict(state_dict, False)
        print('load pretrained model ' + opt.pretrained_model)
    print('pcb model size: {:.5f}M'.format(
        sum(p.numel() for p in pcb_model.parameters()) / 1e6))

    if use_gpu:
        model = nn.DataParallel(model).cuda()
        pcb_model = nn.DataParallel(pcb_model).cuda()

    reid_evaluator = Evaluator(model,
                               pcb_model=pcb_model,
                               norm=opt.norm,
                               eval_flip=opt.eval_flip,
                               re_ranking=opt.re_ranking,
                               concate=True)

    results = reid_evaluator.evaluate(queryloader,
                                      galleryloader,
                                      queryFliploader,
                                      galleryFliploader,
                                      k1=6,
                                      k2=2,
                                      lambda_value=0.3)

    # reid_evaluator.validation(queryloader, galleryloader)

    with open('./result/submission_example_A.json', "w",
              encoding='utf-8') as fd:
        json.dump(results, fd)
예제 #7
0
파일: train.py 프로젝트: ziedbouf/waternn
def validate(val_loader, model, criterion, outfile='predict', seeout=False):
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    model.eval()

    outpath = outfile + '.txt'
    outf = open(outpath, 'w')

    with torch.no_grad():
        end = time.time()
        for i, (target, datas) in enumerate(val_loader):
            # if args.gpu is not None:
            #     input = input.cuda(args.gpu, non_blocking=True)
            # target = target.cuda(args.gpu, non_blocking=True)

            # compute output
            target = target.cuda()
            datas = datas.cuda().float()
            output = model(datas)
            loss = criterion(output, target)
            # measure accuracy and record loss
            acc, pred5, max5out = accuracy(output, target, topk=(1, 5))
            if seeout:
                writepred = pred5.tolist()
                max5out = max5out.tolist()
                for i, item in enumerate(writepred):
                    outf.writelines(
                        str(item).strip('[').strip(']') + ',' +
                        str(max5out[i]).strip('[').strip(']') + ',' +
                        str(target.tolist()[i]) + '\r\n')

            acc1 = acc[0]
            acc5 = acc[1]
            losses.update(loss.item(), datas.size(0))
            top1.update(acc1[0], datas.size(0))
            top5.update(acc5[0], datas.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % opt.plot_every == 0:
                print('Test: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                      'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                          i,
                          len(val_loader),
                          batch_time=batch_time,
                          loss=losses,
                          top1=top1,
                          top5=top5))

        print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1,
                                                                    top5=top5))
        logging.info(
            ' validate-----* Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f} Loss {loss.val:.4f}'
            .format(top1=top1, top5=top5, loss=losses))
    if seeout:
        outf.writelines(
            '* Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f} Loss {loss.val:.4f}\r\n'
            .format(top1=top1, top5=top5, loss=losses))
        outf.writelines('======user config========')
        outf.writelines(pformat(opt._state_dict()))
    outf.close()
    return top1.avg, top5.avg
def train(**kwargs):
    opt._parse(kwargs)
    #opt.lr=0.00002
    opt.model_name = 'AlignedReid'
    # set random seed and cudnn benchmark
    torch.manual_seed(opt.seed)
    os.makedirs(opt.save_dir, exist_ok=True)
    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing dataset {}'.format(opt.dataset))
    dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode)

    pin_memory = True if use_gpu else False

    summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log'))

    trainloader = DataLoader(ImageData(dataset.train,
                                       TrainTransform(opt.datatype)),
                             sampler=RandomIdentitySampler(
                                 dataset.train, opt.num_instances),
                             batch_size=opt.train_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory,
                             drop_last=True)

    queryloader = DataLoader(ImageData(dataset.query,
                                       TestTransform(opt.datatype)),
                             batch_size=opt.test_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory)

    galleryloader = DataLoader(ImageData(dataset.gallery,
                                         TestTransform(opt.datatype)),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)

    queryFliploader = queryloader
    galleryFliploader = galleryloader
    # queryFliploader = DataLoader(
    #     ImageData(dataset.query, TestTransform(opt.datatype, True)),
    #     batch_size=opt.test_batch, num_workers=opt.workers,
    #     pin_memory=pin_memory
    # )
    #
    # galleryFliploader = DataLoader(
    #     ImageData(dataset.gallery, TestTransform(opt.datatype, True)),
    #     batch_size=opt.test_batch, num_workers=opt.workers,
    #     pin_memory=pin_memory
    # )

    print('initializing model ...')
    model = AlignedResNet50(num_classes=dataset.num_train_pids,
                            loss={'softmax', 'metric'},
                            aligned=True,
                            use_gpu=use_gpu)

    optim_policy = model.get_optim_policy()

    if opt.pretrained_model:
        state_dict = torch.load(opt.pretrained_model)['state_dict']
        # state_dict = {k: v for k, v in state_dict.items() \
        #        if not ('reduction' in k or 'softmax' in k)}
        model.load_state_dict(state_dict, False)
        print('load pretrained model ' + opt.pretrained_model)
    print('model size: {:.5f}M'.format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    if use_gpu:
        model = nn.DataParallel(model).cuda()
    reid_evaluator = AlignedEvaluator(model)

    if opt.evaluate:
        #rank1 = test(model, queryloader, galleryloader, use_gpu)
        reid_evaluator.evaluate(queryloader,
                                galleryloader,
                                queryFliploader,
                                galleryFliploader,
                                re_ranking=opt.re_ranking,
                                savefig=opt.savefig,
                                test_distance='global')
        return

    # xent_criterion = nn.CrossEntropyLoss()
    xent_criterion = CrossEntropyLabelSmooth(dataset.num_train_pids,
                                             use_gpu=use_gpu)

    embedding_criterion = TripletLossAlignedReID(margin=opt.margin)

    # def criterion(triplet_y, softmax_y, labels):
    #     losses = [embedding_criterion(output, labels)[0] for output in triplet_y] + \
    #              [xent_criterion(output, labels) for output in softmax_y]
    #     loss = sum(losses)
    #     return loss

    def criterion(outputs, features, local_features, labels):
        if opt.htri_only:
            if isinstance(features, tuple):
                global_loss, local_loss = DeepSupervision(
                    embedding_criterion, features, labels)
            else:
                global_loss, local_loss = embedding_criterion(
                    features, labels, local_features)
        else:
            if isinstance(outputs, tuple):
                xent_loss = DeepSupervision(xent_criterion, outputs, labels)
            else:
                xent_loss = xent_criterion(outputs, labels)

            if isinstance(features, tuple):
                global_loss, local_loss = DeepSupervision(
                    embedding_criterion, features, labels)
            else:
                global_loss, local_loss = embedding_criterion(
                    features, labels, local_features)
        loss = xent_loss + global_loss + local_loss
        return loss

    # get optimizer
    if opt.optim == "sgd":
        optimizer = torch.optim.SGD(optim_policy,
                                    lr=opt.lr,
                                    momentum=0.9,
                                    weight_decay=opt.weight_decay)
    else:
        optimizer = torch.optim.Adam(optim_policy,
                                     lr=opt.lr,
                                     weight_decay=opt.weight_decay)

    start_epoch = opt.start_epoch
    # get trainer and evaluator
    reid_trainer = AlignedTrainer(opt, model, optimizer, criterion,
                                  summary_writer)

    def adjust_lr(optimizer, ep):
        if ep < 50:
            lr = opt.lr * (ep // 5 + 1)
        elif ep < 200:
            lr = opt.lr * 10
        elif ep < 300:
            lr = opt.lr
        else:
            lr = opt.lr * 0.1
        for p in optimizer.param_groups:
            p['lr'] = lr

    # start training
    best_rank1 = opt.best_rank
    best_epoch = 0
    print('start train......')
    for epoch in range(start_epoch, opt.max_epoch):
        if opt.adjust_lr:
            adjust_lr(optimizer, epoch + 1)

        reid_trainer.train(epoch, trainloader)

        # skip if not save model
        if opt.eval_step > 0 and (epoch + 1) % opt.eval_step == 0 or (
                epoch + 1) == opt.max_epoch:

            #  just avoid out of memory during eval,and can't save the model
            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint({
                'state_dict': state_dict,
                'epoch': epoch + 1
            },
                            is_best=0,
                            save_dir=opt.save_dir,
                            filename='checkpoint_ep' + str(epoch + 1) +
                            '.pth.tar')

            if opt.mode == 'class':
                rank1 = test(model, queryloader)
            else:
                rank1 = reid_evaluator.evaluate(queryloader, galleryloader,
                                                queryFliploader,
                                                galleryFliploader)
                #rank1 = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            if is_best:
                save_checkpoint({
                    'state_dict': state_dict,
                    'epoch': epoch + 1
                },
                                is_best=is_best,
                                save_dir=opt.save_dir,
                                filename='checkpoint_ep' + str(epoch + 1) +
                                '.pth.tar')

    print('Best rank-1 {:.1%}, achived at epoch {}'.format(
        best_rank1, best_epoch))
예제 #9
0
def validate(val_loader, model, criterion, outfile='predict', seeout=False):
    batch_time = AverageMeter()
    losses = AverageMeter()
    multi_label_acc = AverageMeter()
    top1 = AverageMeter()
    top2 = AverageMeter()
    top3 = AverageMeter()
    top4 = AverageMeter()
    top5 = AverageMeter()
    # switch to evaluate mode
    model.eval()

    outpath = outfile + '.txt'
    outf = open(outpath, 'w')

    with torch.no_grad():
        end = time.time()
        for i, (target, datas) in enumerate(val_loader):
            # if args.gpu is not None:
            #     input = input.cuda(args.gpu, non_blocking=True)
            # target = target.cuda(args.gpu, non_blocking=True)

            # compute output
            # measure accuracy and record loss
            if opt.multi_label > 1:
                target = target.cuda().float()
                datas = datas.cuda().float()
                output = model(datas)
                # loss = criterion(output, target)
                loss1 = nn.BCELoss()
                loss = loss1(output, target)
                acc, acc_list, output_list, batch_pred, batch_target = accuracy_multilabel(
                    output, target)
                if seeout:
                    # writepred = pred5.tolist()
                    # max5out = max5out.tolist()
                    for i in range(len(output_list)):
                        outf.writelines(
                            "output:" +
                            str(output_list[i]).strip('[').strip(']') + ',' +
                            "pred:" +
                            str(batch_pred[i]).strip('[').strip(']') + ',' +
                            "target_encode:" +
                            str(batch_target[i]).strip('[').strip(']') + ',' +
                            "hamming acc:" + str(acc_list[i]) + '\r\n')
                multi_label_acc.update(acc, 1)
                losses.update(loss.item(), datas.size(0))
                # if lossesnum > losses.val:
                #    lossesnum = losses.val
                #    print('====iter *{}==== * * *   losses.val :{} Update   ========\n'.format(ii, lossesnum))
                # best_path = trainer.save(better=True)
                # print("====epoch[{}]--- iter[{}] ** save params *******===".format(epoch, ii))

                # # if best_acc1 < top1.val:
                # #     best_acc1 = top1.val
                # #     print('===== * * *   best_acc1 :{} Update   ========\n'.format(best_acc1))
                # #     best_path = trainer.save(better=True)

                # measure elapsed time
                batch_time.update(time.time() - end)
                end = time.time()

                if (i + 1) % opt.plot_every == 0:
                    print(
                        'Test: [{0}/{1}]\t'
                        'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                        'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                        'Acc@hamming {multi_label_acc.val:.3f} ({multi_label_acc.avg:.3f})\t'
                        .format(i,
                                len(val_loader),
                                batch_time=batch_time,
                                loss=losses,
                                multi_label_acc=multi_label_acc))
            else:
                target = target.cuda()
                datas = datas.cuda().float()
                output = model(datas)
                loss = criterion(output, target)
                acc, pred5, max5out = accuracy(output,
                                               target,
                                               topk=(1, 2, 3, 4, 5))
                if seeout:
                    writepred = pred5.tolist()
                    max5out = max5out.tolist()
                    for i, item in enumerate(writepred):
                        outf.writelines(
                            str(item).strip('[').strip(']') + ',' +
                            str(max5out[i]).strip('[').strip(']') + ',' +
                            str(target.tolist()[i]) + '\r\n')
                acc1 = acc[0]
                acc2 = acc[1]
                acc3 = acc[2]
                acc4 = acc[3]
                acc5 = acc[4]
                losses.update(loss.item(), datas.size(0))
                top1.update(acc1[0], datas.size(0))
                top2.update(acc2[0], datas.size(0))
                top3.update(acc3[0], datas.size(0))
                top4.update(acc4[0], datas.size(0))
                top5.update(acc5[0], datas.size(0))

                # measure elapsed time
                batch_time.update(time.time() - end)
                end = time.time()

                if i % opt.plot_every == 0:
                    print('Test: [{0}/{1}]\t'
                          'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                          'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                          'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                          'Acc@2 {top2.val:.3f} ({top2.avg:.3f})\t'
                          'Acc@3 {top3.val:.3f} ({top3.avg:.3f})\t'
                          'Acc@4 {top4.val:.3f} ({top4.avg:.3f})\t'
                          'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                              i,
                              len(val_loader),
                              batch_time=batch_time,
                              loss=losses,
                              top1=top1,
                              top2=top2,
                              top3=top3,
                              top4=top4,
                              top5=top5))
    if opt.multi_label > 1:
        print(' * Acc@hamming {multi_label_acc.avg:.3f}'.format(
            multi_label_acc=multi_label_acc))
    else:
        print(
            ' * Acc@1 {top1.avg:.3f} Acc@2 {top2.avg:.3f} Acc@3 {top3.avg:.3f} Acc@4 {top4.avg:.3f} Acc@5 {top5.avg:.3f}'
            .format(top1=top1, top2=top2, top3=top3, top4=top4, top5=top5))
        logging.info(
            ' validate-----* Acc@1 {top1.avg:.3f} Acc@2 {top2.avg:.3f} Acc@3 {top3.avg:.3f} Acc@4 {top4.avg:.3f} Acc@5 {top5.avg:.3f} Loss {loss.val:.4f}\r\n'
            .format(top1=top1,
                    top2=top2,
                    top3=top3,
                    top4=top4,
                    top5=top5,
                    loss=losses))
    if seeout:
        if opt.multi_label > 1:
            outf.writelines(
                '* Acc@hamming {multi_label_acc.avg:.3f} Loss {loss.val:.4f}\r\n'
                .format(multi_label_acc=multi_label_acc, loss=losses))
        else:
            outf.writelines(
                '* Acc@1 {top1.avg:.3f} Acc@2 {top2.avg:.3f} Acc@3 {top3.avg:.3f} Acc@4 {top4.avg:.3f} Acc@5 {top5.avg:.3f} Loss {loss.val:.4f}\r\n'
                .format(top1=top1,
                        top2=top2,
                        top3=top3,
                        top4=top4,
                        top5=top5,
                        loss=losses))
        outf.writelines('======user config========')
        outf.writelines(pformat(opt._state_dict()))
    outf.close()
예제 #10
0
def train(**kwargs):
    opt._parse(kwargs)  ##设置程序的所有参数

    # set random seed and cudnn benchmark
    torch.manual_seed(opt.seed)
    os.makedirs(opt.save_dir, exist_ok=True)
    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing dataset {}'.format(opt.dataset))
    dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode)

    pin_memory = True if use_gpu else False

    trainloader = DataLoader(ImageData(dataset.train,
                                       TrainTransform(opt.datatype)),
                             batch_size=opt.train_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory,
                             drop_last=True)

    queryloader = DataLoader(ImageData(dataset.query,
                                       TestTransform(opt.datatype)),
                             batch_size=opt.test_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory)

    galleryloader = DataLoader(ImageData(dataset.gallery,
                                         TestTransform(opt.datatype)),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)
    queryFliploader = DataLoader(ImageData(dataset.query,
                                           TestTransform(opt.datatype, True)),
                                 batch_size=opt.test_batch,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    galleryFliploader = DataLoader(ImageData(dataset.gallery,
                                             TestTransform(opt.datatype,
                                                           True)),
                                   batch_size=opt.test_batch,
                                   num_workers=opt.workers,
                                   pin_memory=pin_memory)

    print('initializing model ...')
    if opt.model_name == 'softmax' or opt.model_name == 'softmax_triplet':
        model = ResNetBuilder(dataset.num_train_pids, 1, True)
    elif opt.model_name == 'triplet':
        model = ResNetBuilder(None, 1, True)
    elif opt.model_name == 'bfe':
        if opt.datatype == "person":
            model = BFE(dataset.num_train_pids, 1.0, 0.33)
        else:
            model = BFE(dataset.num_train_pids, 0.5, 0.5)
    elif opt.model_name == 'ide':
        model = IDE(dataset.num_train_pids)
    elif opt.model_name == 'resnet':
        model = Resnet(dataset.num_train_pids)
    elif opt.model_name == 'strongBaseline':
        model = StrongBaseline(dataset.num_train_pids)

    optim_policy = model.get_optim_policy()

    # update model
    model = resnet18(True)
    model.fc.out_features = 10

    print('model size: {:.5f}M'.format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    # get optimizer
    if opt.optim == "sgd":
        optimizer = torch.optim.SGD(optim_policy,
                                    lr=opt.lr,
                                    momentum=0.9,
                                    weight_decay=opt.weight_decay)
    else:
        optimizer = torch.optim.Adam(optim_policy,
                                     lr=opt.lr,
                                     weight_decay=opt.weight_decay)

    #xent_criterion = nn.CrossEntropyLoss()
    criterion = CrossEntropyLabelSmooth(10)
    epochs = 100
    best = 0.0
    b_e = 0
    for e in range(epochs):
        model.train()
        for i, inputs in enumerate(trainloader):
            imgs, pid, _ = inputs
            imgs, pid = imgs.cuda(), pid.cuda()
            outputs = model(imgs)
            loss = criterion(outputs, pid)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            print(('epoch=%s \t batch loss=%s') % (e, loss.item()))
예제 #11
0
def train(**kwargs):
    opt._parse(kwargs)

    # set random seed and cudnn benchmark
    torch.manual_seed(opt.seed)
   # os.makedirs(opt.save_dir, exist_ok=True)
    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    print(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing dataset {}'.format(opt.dataset))
    dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode)

    pin_memory = True if use_gpu else False

    summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log'))

    trainloader = DataLoader(
        ImageData(dataset.train, TrainTransform(opt.datatype)),
        sampler=RandomIdentitySampler(dataset.train, opt.num_instances),
        batch_size=opt.train_batch, num_workers=opt.workers,
        pin_memory=pin_memory, drop_last=True
    )

    queryloader = DataLoader(
        ImageData(dataset.query, TestTransform(opt.datatype)),
        batch_size=opt.test_batch, num_workers=opt.workers,
        pin_memory=pin_memory
    )

    galleryloader = DataLoader(
        ImageData(dataset.gallery, TestTransform(opt.datatype)),
        batch_size=opt.test_batch, num_workers=opt.workers,
        pin_memory=pin_memory
    )
    queryFliploader = DataLoader(
        ImageData(dataset.query, TestTransform(opt.datatype, True)),
        batch_size=opt.test_batch, num_workers=opt.workers,
        pin_memory=pin_memory
    )

    galleryFliploader = DataLoader(
        ImageData(dataset.gallery, TestTransform(opt.datatype, True)),
        batch_size=opt.test_batch, num_workers=opt.workers,
        pin_memory=pin_memory
    )

    print('initializing model ...')
    if opt.model_name == 'softmax' or opt.model_name == 'softmax_triplet':
        model = ResNetBuilder(dataset.num_train_pids, 1, True)
    elif opt.model_name == 'triplet':
        model = ResNetBuilder(None, 1, True)
    elif opt.model_name == 'CBDB':
        if opt.datatype == "person":
            model = CBDBdataset.num_train_pids, 1.0, 0.33)
        else:
            model = CBDB(dataset.num_train_pids, 0.5, 0.5)
    elif opt.model_name == 'ide':
        model = IDE(dataset.num_train_pids)
    elif opt.model_name == 'resnet':
        model = Resnet(dataset.num_train_pids)
 
    optim_policy = model.get_optim_policy()

    if opt.pretrained_model:
        state_dict = torch.load(opt.pretrained_model)['state_dict']
        #state_dict = {k: v for k, v in state_dict.items() \
        #        if not ('reduction' in k or 'softmax' in k)}
        model.load_state_dict(state_dict, False)
        print('load pretrained model ' + opt.pretrained_model)
    print('model size: {:.5f}M'.format(sum(p.numel() for p in model.parameters()) / 1e6))

    if use_gpu:
        model = nn.DataParallel(model).cuda()
    reid_evaluator = ResNetEvaluator(model)

    if opt.evaluate:
        reid_evaluator.evaluate(queryloader, galleryloader, 
            queryFliploader, galleryFliploader, re_ranking=opt.re_ranking, savefig=opt.savefig)
        return
예제 #12
0
def validate(**kwargs):
    opt._parse(kwargs)

    # set random seed and cudnn benchmark
    torch.manual_seed(opt.seed)
    # os.makedirs(opt.save_dir, exist_ok=True)
    use_gpu = torch.cuda.is_available()
    # sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing tx_chanllege dataset')

    pin_memory = True if use_gpu else False
    query_dataset = Tx_dataset(set='train_set',
                               file_list='val_query_list.txt').dataset
    gallery_dataset = Tx_dataset(set='train_set',
                                 file_list='val_gallery_list.txt').dataset

    queryloader = DataLoader(ImageDataset(query_dataset,
                                          transform=build_transforms(
                                              opt, is_train=False)),
                             batch_size=opt.test_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory)

    galleryloader = DataLoader(ImageDataset(gallery_dataset,
                                            transform=build_transforms(
                                                opt, is_train=False)),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)

    queryFliploader = DataLoader(ImageDataset(query_dataset,
                                              transform=build_transforms(
                                                  opt,
                                                  is_train=False,
                                                  flip=True)),
                                 batch_size=opt.test_batch,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    galleryFliploader = DataLoader(ImageDataset(gallery_dataset,
                                                transform=build_transforms(
                                                    opt,
                                                    is_train=False,
                                                    flip=True)),
                                   batch_size=opt.test_batch,
                                   num_workers=opt.workers,
                                   pin_memory=pin_memory)

    queryCenterloader = DataLoader(ImageDataset(query_dataset,
                                                transform=build_transforms(
                                                    opt,
                                                    is_train=False,
                                                    crop='center')),
                                   batch_size=opt.test_batch,
                                   num_workers=opt.workers,
                                   pin_memory=pin_memory)

    galleryCenterloader = DataLoader(ImageDataset(gallery_dataset,
                                                  transform=build_transforms(
                                                      opt,
                                                      is_train=False,
                                                      crop='center')),
                                     batch_size=opt.test_batch,
                                     num_workers=opt.workers,
                                     pin_memory=pin_memory)

    queryLtloader = DataLoader(ImageDataset(
        query_dataset,
        transform=build_transforms(opt, is_train=False, crop='lt')),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)

    galleryLtloader = DataLoader(ImageDataset(gallery_dataset,
                                              transform=build_transforms(
                                                  opt,
                                                  is_train=False,
                                                  crop='lt')),
                                 batch_size=opt.test_batch,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    queryRtloader = DataLoader(ImageDataset(
        query_dataset,
        transform=build_transforms(opt, is_train=False, crop='rt')),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)

    galleryRtloader = DataLoader(ImageDataset(gallery_dataset,
                                              transform=build_transforms(
                                                  opt,
                                                  is_train=False,
                                                  crop='rt')),
                                 batch_size=opt.test_batch,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    queryRbloader = DataLoader(ImageDataset(
        query_dataset,
        transform=build_transforms(opt, is_train=False, crop='rb')),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)

    galleryRbloader = DataLoader(ImageDataset(gallery_dataset,
                                              transform=build_transforms(
                                                  opt,
                                                  is_train=False,
                                                  crop='rb')),
                                 batch_size=opt.test_batch,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    queryLbloader = DataLoader(ImageDataset(
        query_dataset,
        transform=build_transforms(opt, is_train=False, crop='lb')),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)

    galleryLbloader = DataLoader(ImageDataset(gallery_dataset,
                                              transform=build_transforms(
                                                  opt,
                                                  is_train=False,
                                                  crop='lb')),
                                 batch_size=opt.test_batch,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    print('initializing model ...')

    model = build_model(opt)

    if opt.pretrained_choice == 'self':
        state_dict = torch.load(opt.pretrained_model)['state_dict']
        # state_dict = {k: v for k, v in state_dict.items() \
        #        if not ('reduction' in k or 'softmax' in k)}
        model.load_state_dict(state_dict, False)
        print('load pretrained model ' + opt.pretrained_model)
    print('model size: {:.5f}M'.format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    if use_gpu:
        model = nn.DataParallel(model).cuda()
    reid_evaluator = Evaluator(model,
                               norm=opt.norm,
                               eval_flip=opt.eval_flip,
                               crop_validation=opt.crop_validation)

    print("without reranking testing......")
    reid_evaluator.validation(queryloader, galleryloader, queryFliploader,
                              galleryFliploader, queryCenterloader,
                              galleryCenterloader, queryLtloader,
                              galleryLtloader, queryRtloader, galleryRtloader,
                              queryLbloader, galleryLbloader, queryRbloader,
                              galleryRbloader)

    max_score = 0
    k = 0
    for k1 in range(1, 21):
        # for la in [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]:
        print("**********k1:{}***********".format(k1))
        score = reid_evaluator.validation(queryloader,
                                          galleryloader,
                                          queryFliploader,
                                          galleryFliploader,
                                          queryCenterloader,
                                          galleryCenterloader,
                                          queryLtloader,
                                          galleryLtloader,
                                          queryRtloader,
                                          galleryRtloader,
                                          queryLbloader,
                                          galleryLbloader,
                                          queryRbloader,
                                          galleryRbloader,
                                          re_ranking=True,
                                          k1=k1,
                                          k2=2,
                                          lambda_value=0.3)
        if score > max_score:
            max_score = score
            k = k1

    print("max_score: {} at k: {}".format(max_score, k))
예제 #13
0
def train(**kwargs):
    opt._parse(kwargs)

    # set random seed and cudnn benchmark

    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    print('initializing dataset {}'.format(opt.dataset))
    dataset = data_manager.init_dataset(name=opt.dataset)

    summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log'))

    if 'triplet' in opt.model_name:
        trainloader = DataLoader(
            ImageData(dataset.train, TrainTransform(opt.height, opt.width)),
            sampler=RandomIdentitySampler(dataset.train, opt.num_instances),
            batch_size=opt.train_batch,
            num_workers=opt.workers,
            last_batch='discard')
    else:
        trainloader = DataLoader(
            ImageData(dataset.train, TrainTransform(opt.height, opt.width)),
            batch_size=opt.train_batch,
            shuffle=True,
            num_workers=opt.workers,
        )

    queryloader = DataLoader(
        ImageData(dataset.query, TestTransform(opt.height, opt.width)),
        batch_size=opt.test_batch,
        num_workers=opt.workers,
    )

    galleryloader = DataLoader(
        ImageData(dataset.gallery, TestTransform(opt.height, opt.width)),
        batch_size=opt.test_batch,
        num_workers=opt.workers,
    )

    print('initializing model ...')
    model = get_baseline_model(dataset.num_train_pids, mx.gpu(0),
                               opt.pretrained_model)

    print('model size: {:.5f}M'.format(
        sum(p.data().size for p in model.collect_params().values()) / 1e6))

    xent_criterion = gluon.loss.SoftmaxCrossEntropyLoss()
    tri_criterion = TripletLoss(opt.margin)

    def cls_criterion(cls_scores, feat, targets):
        cls_loss = xent_criterion(cls_scores, targets)
        return cls_loss

    def triplet_criterion(cls_scores, feat, targets):
        triplet_loss, dist_ap, dist_an = tri_criterion(feat, targets)
        return triplet_loss

    def cls_tri_criterion(cls_scores, feat, targets):
        cls_loss = xent_criterion(cls_scores, targets)
        triplet_loss, dist_ap, dist_an = tri_criterion(feat, targets)
        loss = cls_loss + triplet_loss
        return loss

    # get optimizer
    optimizer = gluon.Trainer(model.collect_params(), opt.optim, {
        'learning_rate': opt.lr,
        'wd': opt.weight_decay
    })

    def adjust_lr(optimizer, ep):
        if ep < 20:
            lr = 1e-4 * (ep + 1) / 2
        elif ep < 80:
            lr = 1e-3 * opt.num_gpu
        elif ep < 180:
            lr = 1e-4 * opt.num_gpu
        elif ep < 300:
            lr = 1e-5 * opt.num_gpu
        elif ep < 320:
            lr = 1e-5 * 0.1**((ep - 320) / 80) * opt.num_gpu
        elif ep < 400:
            lr = 1e-6
        elif ep < 480:
            lr = 1e-4 * opt.num_gpu
        else:
            lr = 1e-5 * opt.num_gpu

        optimizer.set_learning_rate(lr)

    start_epoch = opt.start_epoch

    # get trainer and evaluator
    use_criterion = None
    if opt.model_name == 'softmax':
        use_criterion = cls_criterion
    elif opt.model_name == 'softmax_triplet':
        use_criterion = cls_tri_criterion
    elif opt.model_name == 'triplet':
        use_criterion = triplet_criterion

    reid_trainer = reidTrainer(opt, model, optimizer, use_criterion,
                               summary_writer, mx.gpu(0))
    reid_evaluator = reidEvaluator(model, mx.gpu(0))

    # start training
    best_rank1 = -np.inf
    best_epoch = 0

    for epoch in range(start_epoch, opt.max_epoch):
        if opt.step_size > 0:
            adjust_lr(optimizer, epoch + 1)
        reid_trainer.train(epoch, trainloader)

        # skip if not save model
        if opt.eval_step > 0 and (epoch + 1) % opt.eval_step == 0 or (
                epoch + 1) == opt.max_epoch:
            rank1 = reid_evaluator.evaluate(queryloader, galleryloader)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            state_dict = {'model': model, 'epoch': epoch}
            save_checkpoint(state_dict,
                            is_best=is_best,
                            save_dir=opt.save_dir,
                            filename='checkpoint_ep' + str(epoch + 1) +
                            '.params')

    print('Best rank-1 {:.1%}, achived at epoch {}'.format(
        best_rank1, best_epoch))
예제 #14
0
def train_cycle_gan(**kwargs):
    opt._parse(kwargs)
    torch.manual_seed(opt.seed)

    # Write standard output into file
    sys.stdout = Logger(os.path.join(opt.save_dir, 'log_train.txt'))

    print('========user config========')
    pprint(opt._state_dict())
    print('===========end=============')
    if opt.use_gpu:
        print('currently using GPU')
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    pin_memory = True if opt.use_gpu else False

    print('initializing dataset {}'.format(opt.dataset_mode))
    dataset = UnalignedDataset(opt)
    trainloader = DataLoader(dataset,
                             opt.batchSize,
                             True,
                             num_workers=opt.workers,
                             pin_memory=pin_memory)

    summaryWriter = SummaryWriter(os.path.join(opt.save_dir,
                                               'tensorboard_log'))

    print('initializing model ... ')
    use_dropout = not opt.no_dropout
    netG_A = define_G(opt.input_nc, opt.output_nc, opt.ndf,
                      opt.which_model_netG, opt.norm, use_dropout)
    netG_B = define_G(opt.output_nc, opt.input_nc, opt.ndf,
                      opt.which_model_netG, opt.norm, use_dropout)
    use_sigmoid = opt.no_lsgan
    netD_A = define_D(opt.output_nc, opt.ndf, opt.which_model_netD,
                      opt.n_layers_D, opt.norm, use_sigmoid)
    netD_B = define_D(opt.input_nc, opt.ndf, opt.which_model_netD,
                      opt.n_layers_D, opt.norm, use_sigmoid)
    # print(netD_A)
    optimizer_G = torch.optim.Adam(itertools.chain(netG_A.parameters(),
                                                   netG_B.parameters()),
                                   lr=opt.lr,
                                   betas=(opt.beta1, 0.999))
    optimizer_D = torch.optim.Adam(itertools.chain(netD_A.parameters(),
                                                   netD_B.parameters()),
                                   lr=opt.lr,
                                   betas=(opt.beta1, 0.999))

    def get_scheduler(optimizer, opt):
        if opt.lr_policy == 'lambda':

            def lambda_rule(epoch):
                lr_l = 1.0 - max(0, epoch + 1 + opt.start_epoch -
                                 opt.niter) / float(opt.lr_decay_iters + 1)
                return lr_l

            scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
        elif opt.lr_policy == 'step':
            scheduler = lr_scheduler.StepLR(optimizer,
                                            step_size=opt.lr_decay_iters,
                                            gamma=0.1)
        elif opt.lr_policy == 'plateau':
            scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                                       mode='min',
                                                       factor=0.2,
                                                       threshold=0.01,
                                                       patience=5)
        else:
            return NotImplementedError(
                'learning rate policy [{}] is not implemented'.format(
                    opt.lr_policy))
        return scheduler

    scheduler_G = get_scheduler(optimizer_G, opt)
    scheduler_D = get_scheduler(optimizer_D, opt)

    start_epoch = opt.start_epoch
    if opt.use_gpu:
        netG_A = torch.nn.DataParallel(netG_A).cuda()
        netG_B = torch.nn.DataParallel(netG_B).cuda()
        netD_A = torch.nn.DataParallel(netD_A).cuda()
        netD_B = torch.nn.DataParallel(netD_B).cuda()

    # get trainer
    cycleganTrainer = Trainer(opt, netG_A, netG_B, netD_A, netD_B, optimizer_G,
                              optimizer_D, summaryWriter)
    # start training
    for epoch in range(start_epoch, opt.max_epoch):
        scheduler_G.step()
        scheduler_D.step()
        # train over whole dataset
        cycleganTrainer.train(epoch, trainloader)
        if (epoch + 1) % opt.save_freq == 0 or (epoch + 1) == opt.max_epoch:
            if opt.use_gpu:
                state_dict_netG_A = netG_A.module.state_dict()
                state_dict_netG_B = netG_B.module.state_dict()
                state_dict_netD_A = netD_A.module.state_dict()
                state_dict_netD_B = netD_B.module.state_dict()
            else:
                state_dict_netG_A = netG_A.state_dict()
                state_dict_netG_B = netG_B.state_dict()
                state_dict_netD_A = netD_A.state_dict()
                state_dict_netD_B = netD_B.state_dict()
            save_checkpoint(
                {
                    'netG_A': state_dict_netG_A,
                    'netG_B': state_dict_netG_B,
                    'netD_A': state_dict_netD_A,
                    'netD_B': state_dict_netD_B,
                    'epoch': epoch + 1,
                },
                False,
                save_dir=opt.save_dir,
                filename='checkpoint_ep' + str(epoch + 1))
예제 #15
0
def train(**kwargs):
    opt._parse(kwargs)

    # set random seed and cudnn benchmark
    torch.manual_seed(opt.seed)
    os.makedirs(opt.save_dir, exist_ok=True)
    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing tx_chanllege dataset')
    dataset = Tx_dataset(file_list='train_list_new.txt').dataset
    query_dataset = Tx_dataset(set='train_set',
                               file_list='val_query_list.txt').dataset
    gallery_dataset = Tx_dataset(set='train_set',
                                 file_list='val_gallery_list.txt').dataset

    train_set = ImageDataset(dataset,
                             transform=build_transforms(opt, is_train=True))

    pin_memory = True if use_gpu else False

    summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log'))
    if opt.sampler_new:
        trainloader = DataLoader(
            train_set,
            sampler=RandomIdentitySampler_new(train_set, opt.train_batch,
                                              opt.num_instances),
            # sampler=RandomIdentitySampler(train_set, opt.num_instances),
            batch_size=opt.train_batch,
            num_workers=opt.workers,
            pin_memory=pin_memory,
            drop_last=True)
    else:
        trainloader = DataLoader(
            train_set,
            # sampler=RandomIdentitySampler_new(train_set, opt.train_batch, opt.num_instances),
            sampler=RandomIdentitySampler(train_set, opt.num_instances),
            batch_size=opt.train_batch,
            num_workers=opt.workers,
            pin_memory=pin_memory,
            drop_last=True)

    queryloader = DataLoader(ImageDataset(query_dataset,
                                          transform=build_transforms(
                                              opt, is_train=False)),
                             batch_size=opt.test_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory)

    galleryloader = DataLoader(ImageDataset(gallery_dataset,
                                            transform=build_transforms(
                                                opt, is_train=False)),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)

    queryFliploader = DataLoader(ImageDataset(query_dataset,
                                              transform=build_transforms(
                                                  opt,
                                                  is_train=False,
                                                  flip=True)),
                                 batch_size=opt.test_batch,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    galleryFliploader = DataLoader(ImageDataset(gallery_dataset,
                                                transform=build_transforms(
                                                    opt,
                                                    is_train=False,
                                                    flip=True)),
                                   batch_size=opt.test_batch,
                                   num_workers=opt.workers,
                                   pin_memory=pin_memory)

    queryCenterloader = DataLoader(ImageDataset(query_dataset,
                                                transform=build_transforms(
                                                    opt,
                                                    is_train=False,
                                                    crop='center')),
                                   batch_size=opt.test_batch,
                                   num_workers=opt.workers,
                                   pin_memory=pin_memory)

    galleryCenterloader = DataLoader(ImageDataset(gallery_dataset,
                                                  transform=build_transforms(
                                                      opt,
                                                      is_train=False,
                                                      crop='center')),
                                     batch_size=opt.test_batch,
                                     num_workers=opt.workers,
                                     pin_memory=pin_memory)

    queryLtloader = DataLoader(ImageDataset(
        query_dataset,
        transform=build_transforms(opt, is_train=False, crop='lt')),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)

    galleryLtloader = DataLoader(ImageDataset(gallery_dataset,
                                              transform=build_transforms(
                                                  opt,
                                                  is_train=False,
                                                  crop='lt')),
                                 batch_size=opt.test_batch,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    queryRtloader = DataLoader(ImageDataset(
        query_dataset,
        transform=build_transforms(opt, is_train=False, crop='rt')),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)

    galleryRtloader = DataLoader(ImageDataset(gallery_dataset,
                                              transform=build_transforms(
                                                  opt,
                                                  is_train=False,
                                                  crop='rt')),
                                 batch_size=opt.test_batch,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    queryRbloader = DataLoader(ImageDataset(
        query_dataset,
        transform=build_transforms(opt, is_train=False, crop='rb')),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)

    galleryRbloader = DataLoader(ImageDataset(gallery_dataset,
                                              transform=build_transforms(
                                                  opt,
                                                  is_train=False,
                                                  crop='rb')),
                                 batch_size=opt.test_batch,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    queryLbloader = DataLoader(ImageDataset(
        query_dataset,
        transform=build_transforms(opt, is_train=False, crop='lb')),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)

    galleryLbloader = DataLoader(ImageDataset(gallery_dataset,
                                              transform=build_transforms(
                                                  opt,
                                                  is_train=False,
                                                  crop='lb')),
                                 batch_size=opt.test_batch,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    print('initializing model ...')

    model = build_model(opt)

    optim_policy = model.get_optim_policy()

    if opt.pretrained_choice == 'self':
        state_dict = torch.load(opt.pretrained_model)['state_dict']
        # state_dict = {k: v for k, v in state_dict.items() \
        #        if not ('reduction' in k or 'softmax' in k)}
        model.load_state_dict(state_dict, False)
        print('load pretrained model ' + opt.pretrained_model)
    print('model size: {:.5f}M'.format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    if use_gpu:
        model = nn.DataParallel(model).cuda()
    reid_evaluator = Evaluator(model,
                               norm=opt.norm,
                               eval_flip=opt.eval_flip,
                               re_ranking=opt.re_ranking)

    if opt.use_center:
        criterion = make_loss_with_center(opt)
    else:
        criterion = make_loss(opt)

    # get optimizer
    if opt.optim == "sgd":
        optimizer = torch.optim.SGD(optim_policy,
                                    lr=opt.lr,
                                    momentum=0.9,
                                    weight_decay=opt.weight_decay)
    else:
        optimizer = torch.optim.Adam(optim_policy,
                                     lr=opt.lr,
                                     weight_decay=opt.weight_decay)

    start_epoch = opt.start_epoch
    # get trainer and evaluator
    reid_trainer = cls_tripletTrainer(opt, model, optimizer, criterion,
                                      summary_writer)

    # start training
    best_rank1 = opt.best_rank
    best_epoch = 0
    for epoch in range(start_epoch, opt.max_epoch):
        if opt.adjust_lr:
            adjust_lr(optimizer, opt.lr, opt.model_name, epoch + 1)
        reid_trainer.train(epoch, trainloader)

        # skip if not save model
        if opt.eval_step > 0 and (epoch + 1) % opt.eval_step == 0 or (
                epoch + 1) == opt.max_epoch:
            rank1 = reid_evaluator.validation(
                queryloader, galleryloader, queryFliploader, galleryFliploader,
                queryCenterloader, galleryCenterloader, queryLtloader,
                galleryLtloader, queryRtloader, galleryRtloader, queryLbloader,
                galleryLbloader, queryRbloader, galleryRbloader)
            print('start re_ranking......')
            _ = reid_evaluator.validation(queryloader,
                                          galleryloader,
                                          queryFliploader,
                                          galleryFliploader,
                                          queryCenterloader,
                                          galleryCenterloader,
                                          queryLtloader,
                                          galleryLtloader,
                                          queryRtloader,
                                          galleryRtloader,
                                          queryLbloader,
                                          galleryLbloader,
                                          queryRbloader,
                                          galleryRbloader,
                                          re_ranking=True)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint({
                'state_dict': state_dict,
                'epoch': epoch + 1
            },
                            is_best=is_best,
                            save_dir=opt.save_dir,
                            filename='checkpoint_ep' + str(epoch + 1) +
                            '.pth.tar')

    print('Best rank-1 {:.1%}, achived at epoch {}'.format(
        best_rank1, best_epoch))
예제 #16
0
def train(**kwargs):
    opt._parse(kwargs)
    # set random seed and cudnn benchmark
    torch.manual_seed(opt.seed)
    # os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    os.makedirs(opt.save_dir, exist_ok=True)
    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')
    pin_memory = True if use_gpu else False
    summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log'))
    # -------------- model and parameter loading ------------------
    print('initializing model ...')
    if opt.model_name == 'bfe':
        if opt.datatype == "person":
            model = BFE(751, 1.0, 0.33)
        else:
            model = BFE(751, 0.5, 0.5)

    optim_policy = model.parameters()

    if opt.pretrained_model:
        state_dict = torch.load(opt.pretrained_model)['state_dict']
        model.load_state_dict(state_dict, False)
        print('load pretrained model ' + opt.pretrained_model)

    print('model size: {:.5f}M'.format(
        sum(p.numel() for p in model.parameters()) / 1e6))
    if use_gpu:
        model = nn.DataParallel(model).cuda()
        # model.cuda()
    reid_evaluator = ResNetEvaluator(model)

    # -------------------------- load end -------------------------
    def handleDataset():
        print('initializing dataset {}'.format(opt.dataset))
        # 之前不需要动
        dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode)

        # for query images
        queryloader = DataLoader(
            ImageData(dataset.query, TestTransform(opt.datatype)),
            batch_size=opt.test_batch,
            num_workers=opt.workers,  # test_batch = 1
            pin_memory=pin_memory)

        # for target image
        galleryloader = DataLoader(ImageData(dataset.target,
                                             TestTransform(opt.datatype)),
                                   batch_size=opt.test_batch,
                                   num_workers=opt.workers,
                                   pin_memory=pin_memory)

        queryFliploader = DataLoader(ImageData(
            dataset.query, TestTransform(opt.datatype, True)),
                                     batch_size=opt.test_batch,
                                     num_workers=opt.workers,
                                     pin_memory=pin_memory)

        galleryFliploader = DataLoader(ImageData(
            dataset.target, TestTransform(opt.datatype, True)),
                                       batch_size=opt.test_batch,
                                       num_workers=opt.workers,
                                       pin_memory=pin_memory)

        return queryloader, galleryloader, queryFliploader, galleryFliploader

    # def deleteDirImage():

    def recv_and_send_data(clnt_sock):
        # 循环接收和发送数据
        strSend = 'Please send messages to me... \n'
        strSend = strSend.encode()
        clnt_sock.send(strSend)
        print("send successfully")
        while True:
            recv_data = clnt_sock.recv(1024)

            queryloader, galleryloader, queryFliploader, galleryFliploader = handleDataset(
            )
            cmc = reid_evaluator.evaluate(queryloader,
                                          galleryloader,
                                          queryFliploader,
                                          galleryFliploader,
                                          re_ranking=opt.re_ranking,
                                          savefig=opt.savefig)
            # reply 需要换成对应的数据
            if recv_data:
                reply = 'cmc : ' + str(cmc)  # cmc is numpy.floate
                clnt_sock.sendall(reply.encode())
                # 删除文件夹数据
                # deleteDirImage
            else:
                break
        clnt_sock.close()

    # ------------------------ start TCP ----------------------------
    serv_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    print("socket creating...")
    # bind
    try:
        serv_sock.bind(('127.0.0.1', 8801))
    except socket.error:
        print("Bind failed ")
        sys.exit()
    print("socket bind successfully")
    # listen
    serv_sock.listen(10)
    print("socket start listening")
    # accept
    while True:
        clnt_sock, clnt_addr = serv_sock.accept()
        print("Connected to IP:port —— ", clnt_addr[0], ' : ',
              str(clnt_addr[1]))
        # core part
        start_new_thread(recv_and_send_data, (clnt_sock, ))  # 元组形式

    # close
    serv_sock.close()
예제 #17
0
def train(**kwargs):
    opt._parse(kwargs)

    os.makedirs(opt.save_dir, exist_ok=True)
    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing dataset {}'.format(opt.dataset))
    dataset = data_manager.init_dataset(name=opt.dataset, use_all=opt.use_all)

    summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log'))
    # load data
    pin_memory = True if use_gpu else False
    dataloader = load_data(dataset, pin_memory)

    print('initializing model ...')
    if opt.loss == 'softmax' or opt.loss == 'softmax_triplet':
        model = ResNetBuilder(dataset.num_train_pids, opt.last_stride, True)
    elif opt.loss == 'triplet':
        model = ResNetBuilder(None, opt.last_stride, True)

    if opt.pretrained_model:
        if use_gpu:
            state_dict = torch.load(opt.pretrained_model)['state_dict']
        else:
            state_dict = torch.load(opt.pretrained_model,
                                    map_location='cpu')['state_dict']

        model.load_state_dict(state_dict, False)
        print('load pretrained model ' + opt.pretrained_model)

    print('model size: {:.5f}M'.format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    optim_policy = model.get_optim_policy()
    if use_gpu:
        model = nn.DataParallel(model).cuda()
    reid_evaluator = ResNetEvaluator(model)

    if opt.evaluate:
        reid_evaluator.evaluate(dataloader['query'],
                                dataloader['gallery'],
                                dataloader['queryFlip'],
                                dataloader['galleryFlip'],
                                savefig=opt.savefig)
        return

    criterion = get_loss()

    # optimizer
    if opt.optim == "sgd":
        optimizer = torch.optim.SGD(optim_policy,
                                    lr=opt.lr,
                                    momentum=0.9,
                                    weight_decay=5e-4)
    else:
        optimizer = torch.optim.Adam(optim_policy,
                                     lr=opt.lr,
                                     weight_decay=5e-4)

    scheduler = WarmupMultiStepLR(optimizer, [40, 70], 0.1, 0.01, 10, 'linear')

    start_epoch = opt.start_epoch
    # get trainer and evaluator
    reid_trainer = Trainer(opt, model, optimizer, criterion, summary_writer)

    # start training
    best_rank1 = opt.best_rank
    best_epoch = 0
    for epoch in range(start_epoch, opt.max_epoch):
        scheduler.step()

        reid_trainer.train(epoch, dataloader['train'])

        # skip if not save model
        if opt.eval_step > 0 and (epoch + 1) % opt.eval_step == 0 or (
                epoch + 1) == opt.max_epoch:
            rank1 = reid_evaluator.evaluate(dataloader['query'],
                                            dataloader['gallery'],
                                            dataloader['queryFlip'],
                                            dataloader['galleryFlip'])

            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            state_dict = model.state_dict()
            save_checkpoint({
                'state_dict': state_dict,
                'epoch': epoch + 1
            },
                            is_best=is_best,
                            save_dir=opt.save_dir,
                            filename='checkpoint_ep' + str(epoch + 1) +
                            '.pth.tar')

    print('Best rank-1 {:.1%}, achived at epoch {}'.format(
        best_rank1, best_epoch))
예제 #18
0
def multi_validate(**kwargs):
    opt._parse(kwargs)

    # set random seed and cudnn benchmark
    torch.manual_seed(opt.seed)
    # os.makedirs(opt.save_dir, exist_ok=True)
    use_gpu = torch.cuda.is_available()
    # sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing tx_chanllege dataset')

    pin_memory = True if use_gpu else False
    query_dataset = Tx_dataset(set='train_set',
                               file_list='val_query_list.txt').dataset
    gallery_dataset = Tx_dataset(set='train_set',
                                 file_list='val_gallery_list.txt').dataset

    queryloader = DataLoader(ImageDataset(query_dataset,
                                          transform=build_transforms(
                                              opt, is_train=False)),
                             batch_size=opt.test_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory)

    galleryloader = DataLoader(ImageDataset(gallery_dataset,
                                            transform=build_transforms(
                                                opt, is_train=False)),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)

    queryFliploader = DataLoader(ImageDataset(query_dataset,
                                              transform=build_transforms(
                                                  opt,
                                                  is_train=False,
                                                  flip=True)),
                                 batch_size=opt.test_batch,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    galleryFliploader = DataLoader(ImageDataset(gallery_dataset,
                                                transform=build_transforms(
                                                    opt,
                                                    is_train=False,
                                                    flip=True)),
                                   batch_size=opt.test_batch,
                                   num_workers=opt.workers,
                                   pin_memory=pin_memory)

    models = []
    print('initializing model ...')

    model = build_model(opt)

    if opt.pretrained_choice == 'self':
        state_dict = torch.load(opt.pretrained_model)['state_dict']
        # state_dict = {k: v for k, v in state_dict.items() \
        #        if not ('reduction' in k or 'softmax' in k)}
        model.load_state_dict(state_dict, False)
        print('load pretrained model ' + opt.pretrained_model)
    print('model size: {:.5f}M'.format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    models.append(model)

    base_path = '/data/zhoumi/REID/tx_challenge/pytorch-ckpt/'
    model_paths = [
        'mgn_ibn_bnneck_eraParam/model_best.pth.tar',
        'mgn_ibn_bnneck_eraParam_feat512/checkpoint_ep180.pth.tar',
        'mgn_ibn_bnneck_era/model_best.pth.tar',
        'mgn_ibn_bnneck_eraParam_feat1024/model_best.pth.tar',
        'mgn_ibn_bnneck_eraParam_feat512/model_best.pth.tar',
        'StackPCBv2_ibn_bnneck/model_best.pth.tar'
    ]

    for model_path in model_paths:
        if opt.pretrained_choice == 'self':
            if '1024' in model_path:
                opt.feat = 1024
            elif '512' in model_path:
                opt.feat = 512
            else:
                opt.feat = 256

            if 'StackPCBv2' in model_path:
                opt.feat = 256
                opt.model_name = 'StackPCBv2'
            else:
                opt.model_name = 'MGN'

            model = build_model(opt)

            state_dict = torch.load(base_path + model_path)['state_dict']
            # state_dict = {k: v for k, v in state_dict.items() \
            #        if not ('reduction' in k or 'softmax' in k)}
            model.load_state_dict(state_dict, False)
            print('load pretrained model ' + model_path)
            models.append(model)

        print('pcb model size: {:.5f}M'.format(
            sum(p.numel() for p in model.parameters()) / 1e6))

    if use_gpu:
        model = nn.DataParallel(model).cuda()
        pcb_model = nn.DataParallel(pcb_model).cuda()
    reid_evaluator = Evaluator(model,
                               pcb_model=pcb_model,
                               norm=opt.norm,
                               eval_flip=opt.eval_flip,
                               concate=True)

    print("without reranking testing......")
    reid_evaluator.validation(queryloader, galleryloader, queryFliploader,
                              galleryFliploader)

    max_score = 0
    k = 0
    for k1 in range(1, 21):
        # for la in [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]:
        print("**********k1:{}***********".format(k1))
        score = reid_evaluator.validation(queryloader,
                                          galleryloader,
                                          queryFliploader,
                                          galleryFliploader,
                                          re_ranking=True,
                                          k1=k1,
                                          k2=2,
                                          lambda_value=0.3)
        if score > max_score:
            max_score = score
            k = k1

    print("max_score: {} at k: {}".format(max_score, k))
예제 #19
0
def train(**kwargs):
    opt._parse(kwargs)
    # torch.backends.cudnn.deterministic = True  # I think this line may slow down the training process
    # set random seed and cudnn benchmark
    torch.manual_seed(opt.seed)
    random.seed(opt.seed)
    np.random.seed(opt.seed)

    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(
        os.path.join('./pytorch-ckpt/current', opt.save_dir, 'log_train.txt'))

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
    else:
        print('currently using cpu')
    print(opt._state_dict())
    print('initializing dataset {}'.format(opt.trainset_name))
    if opt.trainset_name == 'combine':
        #input dataset name as 'datasets'
        train_dataset = data_manager.init_combine_dataset(
            name=opt.trainset_name,
            options=opt,
            datasets=opt.datasets,
            num_bn_sample=opt.batch_num_bn_estimatation * opt.test_batch,
            share_cam=opt.share_cam,
            num_pids=opt.num_pids)
    elif opt.trainset_name == 'unreal':
        # input dataset dir in 'datasets'
        train_dataset = data_manager.init_unreal_dataset(
            name=opt.trainset_name,
            datasets=opt.datasets,
            num_pids=opt.num_pids,
            num_cams=opt.num_cams,
            img_per_person=opt.img_per_person)

    else:
        train_dataset = data_manager.init_dataset(
            name=opt.trainset_name,
            num_bn_sample=opt.batch_num_bn_estimatation * opt.test_batch,
            num_pids=opt.num_pids)
    pin_memory = True if use_gpu else False
    summary_writer = SummaryWriter(
        os.path.join('./pytorch-ckpt/current', opt.save_dir,
                     'tensorboard_log'))

    if opt.cam_bal:
        IDSampler = IdentityCameraSampler
    else:
        IDSampler = IdentitySampler
    if opt.trainset_name == 'combine':
        samp = IDSampler(train_dataset.train, opt.train_batch,
                         opt.num_instances, train_dataset.cams_of_dataset,
                         train_dataset.len_of_real_dataset)
    else:
        samp = IDSampler(train_dataset.train, opt.train_batch,
                         opt.num_instances)

    trainloader = DataLoader(data_manager.init_datafolder(
        opt.trainset_name, train_dataset.train,
        TrainTransform(opt.height, opt.width)),
                             sampler=samp,
                             batch_size=opt.train_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory,
                             drop_last=True,
                             collate_fn=NormalCollateFn())
    print('initializing model ...')
    num_pid = train_dataset.num_train_pids if opt.loss == 'softmax' else None
    model = ResNetBuilder(num_pid)
    if opt.model_path is not None and 'moco' in opt.model_path:
        model = load_moco_model(model, opt.model_path)
    elif opt.model_path is not None:
        model = load_previous_model(model,
                                    opt.model_path,
                                    load_fc_layers=False)
    optim_policy = model.get_optim_policy()
    print('model size: {:.5f}M'.format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    if use_gpu:
        model = CamDataParallel(model).cuda()

    xent = nn.CrossEntropyLoss()
    triplet = TripletLoss()

    def standard_cls_criterion(feat, preditions, targets, global_step,
                               summary_writer):
        identity_loss = xent(preditions, targets)
        identity_accuracy = torch.mean(
            (torch.argmax(preditions, dim=1) == targets).float())
        summary_writer.add_scalar('cls_loss', identity_loss.item(),
                                  global_step)
        summary_writer.add_scalar('cls_accuracy', identity_accuracy.item(),
                                  global_step)
        return identity_loss

    def triplet_criterion(feat, preditons, targets, global_step,
                          summary_writer):
        triplet_loss, acc = triplet(feat, targets)
        summary_writer.add_scalar('loss', triplet_loss.item(), global_step)
        print(np.mean(acc.item()))
        summary_writer.add_scalar('accuracy', acc.item(), global_step)
        return triplet_loss

    # get trainer and evaluator
    optimizer, adjust_lr = get_our_optimizer_strategy(opt, optim_policy)
    if opt.loss == 'softmax':
        crit = standard_cls_criterion
    elif opt.loss == 'triplet':
        crit = triplet_criterion
    reid_trainer = CameraClsTrainer(opt, model, optimizer, crit,
                                    summary_writer)

    print('Start training')
    for epoch in range(opt.max_epoch):
        adjust_lr(optimizer, epoch)
        reid_trainer.train(epoch, trainloader)
        if (epoch + 1) % opt.save_step == 0:
            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()

            save_checkpoint({
                'state_dict': state_dict,
                'epoch': epoch + 1,
            },
                            save_dir=os.path.join('./pytorch-ckpt/current',
                                                  opt.save_dir),
                            ep=epoch + 1)

    # if (epoch+1)%15==0:
    #     save_checkpoint({
    #         'state_dict': state_dict,
    #         'epoch': epoch + 1,
    #         }, save_dir=os.path.join('./pytorch-ckpt/current', opt.save_dir))

    if use_gpu:
        state_dict = model.module.state_dict()
    else:
        state_dict = model.state_dict()

    save_checkpoint({
        'state_dict': state_dict,
        'epoch': epoch + 1,
    },
                    save_dir=os.path.join('./pytorch-ckpt/current',
                                          opt.save_dir))
예제 #20
0
def train(**kwargs):
    opt._parse(kwargs)
    #opt.lr=0.00002
    opt.model_name='PCB'
    # set random seed and cudnn benchmark
    torch.manual_seed(opt.seed)
    os.makedirs(opt.save_dir, exist_ok=True)
    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing dataset {}'.format(opt.dataset))
    dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode)
    tgt_dataset = data_manager.init_dataset(name=opt.tgt_dataset,mode=opt.mode)

    pin_memory = True if use_gpu else False

    summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log'))

    trainloader = DataLoader(
        ImageData(dataset.train, TrainTransform(opt.datatype)),
        batch_size=opt.train_batch, num_workers=opt.workers,
        pin_memory=pin_memory, drop_last=True
    )

    tgt_trainloader = DataLoader(
        ImageData(tgt_dataset.train, TrainTransform(opt.datatype)),
        batch_size=opt.train_batch,num_workers=opt.workers,
        pin_memory=pin_memory,drop_last=True
    )

    tgt_queryloader = DataLoader(
        ImageData(tgt_dataset.query, TestTransform(opt.datatype)),
        batch_size=opt.test_batch, num_workers=opt.workers,
        pin_memory=pin_memory
    )

    tgt_galleryloader = DataLoader(
        ImageData(tgt_dataset.gallery, TestTransform(opt.datatype)),
        batch_size=opt.test_batch, num_workers=opt.workers,
        pin_memory=pin_memory
    )
    tgt_queryFliploader = DataLoader(
        ImageData(tgt_dataset.query, TestTransform(opt.datatype, True)),
        batch_size=opt.test_batch, num_workers=opt.workers,
        pin_memory=pin_memory
    )

    tgt_galleryFliploader = DataLoader(
        ImageData(tgt_dataset.gallery, TestTransform(opt.datatype, True)),
        batch_size=opt.test_batch, num_workers=opt.workers,
        pin_memory=pin_memory
    )

    print('initializing model ...')
    model = PCB(dataset.num_train_pids)


    optim_policy = model.get_optim_policy()

    start_epoch = opt.start_epoch

    if opt.pretrained_model:
        checkpoint = torch.load(opt.pretrained_model)
        state_dict = checkpoint['state_dict']

        # state_dict = {k: v for k, v in state_dict.items() \
        #        if not ('reduction' in k or 'softmax' in k)}
        try:
            model.load_state_dict(state_dict, False)
            print('load pretrained model ' + opt.pretrained_model)
        except:
            RuntimeError('please keep the same size with source dataset..')
    else:
        raise RuntimeError('please load a pre-trained model...')

    print('model size: {:.5f}M'.format(sum(p.numel() for p in model.parameters()) / 1e6))


    if use_gpu:
        model = nn.DataParallel(model).cuda()
    reid_evaluator = ResNetEvaluator(model)

    if opt.evaluate:
        print('transfer directly....... ')
        reid_evaluator.evaluate(tgt_queryloader, tgt_galleryloader,
                                tgt_queryFliploader, tgt_galleryFliploader, re_ranking=opt.re_ranking, savefig=opt.savefig)
        return


    #xent_criterion = CrossEntropyLabelSmooth(dataset.num_train_pids)


    embedding_criterion = SelfTraining_TripletLoss(margin=0.5,num_instances=4)

    # def criterion(triplet_y, softmax_y, labels):
    #     losses = [embedding_criterion(output, labels)[0] for output in triplet_y] + \
    #              [xent_criterion(output, labels) for output in softmax_y]
    #     loss = sum(losses)
    #     return loss


    def criterion(triplet_y, softmax_y, labels):
        #losses = [torch.sum(torch.stack([xent_criterion(logits, labels) for logits in softmax_y]))]
        losses = [torch.sum(torch.stack([embedding_criterion(output,labels) for output in triplet_y]))]
        loss = sum(losses)
        return loss


    # get optimizer
    if opt.optim == "sgd":
        optimizer = torch.optim.SGD(optim_policy, lr=opt.lr, momentum=0.9, weight_decay=opt.weight_decay)
    else:
        optimizer = torch.optim.Adam(optim_policy, lr=opt.lr, weight_decay=opt.weight_decay)


    # get trainer and evaluator
    reid_trainer = PCBTrainer(opt, model, optimizer, criterion, summary_writer)

    def adjust_lr(optimizer, ep):
        if ep < 50:
            lr = opt.lr * (ep // 5 + 1)
        elif ep < 200:
            lr = opt.lr*10
        elif ep < 300:
            lr = opt.lr
        else:
            lr = opt.lr*0.1
        for p in optimizer.param_groups:
            p['lr'] = lr

    # start training
    best_rank1 = opt.best_rank
    best_epoch = 0


    print('transfer directly.....')
    reid_evaluator.evaluate(tgt_queryloader, tgt_galleryloader,
                            tgt_queryFliploader, tgt_galleryFliploader, re_ranking=opt.re_ranking, savefig=opt.savefig)


    for iter_n in range(start_epoch,opt.max_epoch):
        if opt.lambda_value == 0:
            source_features = 0
        else:
            # get source datas' feature
            print('Iteration {}: Extracting Source Dataset Features...'.format(iter_n + 1))
            source_features, _ = extract_pcb_features(model, trainloader)

        # extract training images' features
        print('Iteration {}: Extracting Target Dataset Features...'.format(iter_n + 1))
        target_features, _ = extract_pcb_features(model, tgt_trainloader)
        # synchronization feature order with dataset.train

        # calculate distance and rerank result
        print('Calculating feature distances...')
        target_features = target_features.numpy()
        rerank_dist = re_ranking(
            source_features, target_features, lambda_value=opt.lambda_value)
        if iter_n == 0:
            # DBSCAN cluster
            tri_mat = np.triu(rerank_dist, 1)  # tri_mat.dim=2    取上三角
            tri_mat = tri_mat[np.nonzero(tri_mat)]  # tri_mat.dim=1
            tri_mat = np.sort(tri_mat, axis=None)
            top_num = np.round(opt.rho * tri_mat.size).astype(int)
            eps = tri_mat[:top_num].mean()  # DBSCAN聚类半径
            print('eps in cluster: {:.3f}'.format(eps))
            cluster = DBSCAN(eps=eps, min_samples=4, metric='precomputed', n_jobs=8)

        # select & cluster images as training set of this epochs
        print('Clustering and labeling...')
        labels = cluster.fit_predict(rerank_dist)
        del(rerank_dist)
        del(source_features)
        del(target_features)
        try:
            gc.collect()
        except:
            print('cannot collect')

        num_ids = len(set(labels)) - 1
        print('Iteration {} have {} training ids'.format(iter_n + 1, num_ids))
        # generate new dataset
        new_dataset = []
        for (fname, _, _), label in zip(tgt_dataset.train, labels):
            if label == -1:
                continue
            # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0
            new_dataset.append((fname, label, 0))
        print('Iteration {} have {} training images'.format(iter_n + 1, len(new_dataset)))

        selftrain_loader = DataLoader(
            ImageData(new_dataset, TrainTransform(opt.datatype)),
            sampler=RandomIdentitySampler(new_dataset, opt.num_instances),
            batch_size=opt.train_batch, num_workers=opt.workers,
            pin_memory=pin_memory, drop_last=True
        )

        # train model with new generated dataset
        trainer = PCBTrainer(opt, model, optimizer, criterion, summary_writer)
        reid_evaluator = ResNetEvaluator(model)
        # Start training
        for epoch in range(opt.selftrain_iterations):
            trainer.train(epoch, selftrain_loader)


        # skip if not save model
        if opt.eval_step > 0 and (iter_n + 1) % opt.eval_step == 0 or (iter_n + 1) == opt.max_epoch:
            #  just avoid out of memory during eval,and can't save the model
            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint({'state_dict': state_dict, 'epoch': iter_n + 1},
                            is_best=0, save_dir=opt.save_dir,
                            filename='checkpoint_ep' + str(iter_n + 1) + '.pth.tar')


            if (iter_n + 1) % (opt.eval_step*4) == 0:
                if opt.mode == 'class':
                    rank1 = test(model, tgt_queryloader)
                else:
                    rank1 = reid_evaluator.evaluate(tgt_queryloader, tgt_galleryloader, tgt_queryFliploader,
                                                    tgt_galleryFliploader)
                is_best = rank1 > best_rank1
                if is_best:
                    best_rank1 = rank1
                    best_epoch = iter_n + 1

                if use_gpu:
                    state_dict = model.module.state_dict()
                else:
                    state_dict = model.state_dict()

                if is_best:
                    save_checkpoint({'state_dict': state_dict, 'epoch': iter_n + 1},
                                    is_best=is_best, save_dir=opt.save_dir,
                                    filename='checkpoint_ep' + str(iter_n + 1) + '.pth.tar')

    print('Best rank-1 {:.1%}, achived at epoch {}'.format(best_rank1, best_epoch))
예제 #21
0
def train(**kwargs):
    opt._parse(kwargs)

    # set random seed and cudnn benchmark
    torch.manual_seed(opt.seed)

    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing dataset {}'.format(opt.dataset))
    dataset = data_manager.init_dataset(name=opt.dataset)

    pin_memory = True if use_gpu else False

    summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log'))

    if 'triplet' in opt.model_name:
        trainloader = DataLoader(
            ImageData(dataset.train, TrainTransform(opt.height, opt.width)),
            sampler=RandomIdentitySampler(dataset.train, opt.num_instances),
            batch_size=opt.train_batch,
            num_workers=opt.workers,
            pin_memory=pin_memory,
            drop_last=True)
    else:
        trainloader = DataLoader(ImageData(
            dataset.train, TrainTransform(opt.height, opt.width)),
                                 batch_size=opt.train_batch,
                                 shuffle=True,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    queryloader = DataLoader(ImageData(dataset.query,
                                       TestTransform(opt.height, opt.width)),
                             batch_size=opt.test_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory)

    galleryloader = DataLoader(ImageData(dataset.gallery,
                                         TestTransform(opt.height, opt.width)),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)

    print('initializing model ...')
    if opt.model_name == 'softmax' or opt.model_name == 'softmax_triplet':
        model, optim_policy = get_baseline_model(dataset.num_train_pids)
    elif opt.model_name == 'triplet':
        model, optim_policy = get_baseline_model(num_classes=None)
    print('model size: {:.5f}M'.format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    # xent_criterion = nn.CrossEntropyLoss()
    xent_criterion = CrossEntropyLabelSmooth(dataset.num_train_pids)
    tri_criterion = TripletLoss(opt.margin)

    def cls_criterion(cls_scores, targets):
        cls_loss = xent_criterion(cls_scores, targets)
        return cls_loss

    def triplet_criterion(feat, targets):
        triplet_loss, _, _ = tri_criterion(feat, targets)
        return triplet_loss

    def cls_tri_criterion(cls_scores, feat, targets):
        cls_loss = xent_criterion(cls_scores, targets)
        triplet_loss, _, _ = tri_criterion(feat, targets)
        loss = cls_loss + triplet_loss
        return loss

    # get optimizer
    optimizer = torch.optim.Adam(optim_policy,
                                 lr=opt.lr,
                                 weight_decay=opt.weight_decay)

    def adjust_lr(optimizer, ep):
        if ep < 20:
            lr = 1e-4 * (ep + 1) / 2
        elif ep < 80:
            lr = 1e-3 * opt.num_gpu
        elif ep < 180:
            lr = 1e-4 * opt.num_gpu
        elif ep < 300:
            lr = 1e-5 * opt.num_gpu
        elif ep < 320:
            lr = 1e-5 * 0.1**((ep - 320) / 80) * opt.num_gpu
        elif ep < 400:
            lr = 1e-6
        elif ep < 480:
            lr = 1e-4 * opt.num_gpu
        else:
            lr = 1e-5 * opt.num_gpu
        for p in optimizer.param_groups:
            p['lr'] = lr

    start_epoch = opt.start_epoch
    if use_gpu:
        model = nn.DataParallel(model).cuda()

    # get trainer and evaluator
    if opt.model_name == 'softmax':
        reid_trainer = clsTrainer(opt, model, optimizer, cls_criterion,
                                  summary_writer)
    elif opt.model_name == 'softmax_triplet':
        reid_trainer = cls_tripletTrainer(opt, model, optimizer,
                                          cls_tri_criterion, summary_writer)
    elif opt.model_name == 'triplet':
        reid_trainer = tripletTrainer(opt, model, optimizer, triplet_criterion,
                                      summary_writer)
    reid_evaluator = ResNetEvaluator(model)

    # start training
    best_rank1 = -np.inf
    best_epoch = 0
    for epoch in range(start_epoch, opt.max_epoch):
        if opt.step_size > 0:
            adjust_lr(optimizer, epoch + 1)
        reid_trainer.train(epoch, trainloader)

        # skip if not save model
        if opt.eval_step > 0 and (epoch + 1) % opt.eval_step == 0 or (
                epoch + 1) == opt.max_epoch:
            rank1 = reid_evaluator.evaluate(queryloader, galleryloader)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint({
                'state_dict': state_dict,
                'epoch': epoch + 1,
            },
                            is_best=is_best,
                            save_dir=opt.save_dir,
                            filename='checkpoint_ep' + str(epoch + 1) +
                            '.pth.tar')

    print('Best rank-1 {:.1%}, achived at epoch {}'.format(
        best_rank1, best_epoch))
예제 #22
0
def extract_features(**kwargs):
    opt._parse(kwargs)

    # set random seed and cudnn benchmark
    # torch.manual_seed(opt.seed)
    os.makedirs(opt.save_dir, exist_ok=True)
    use_gpu = torch.cuda.is_available()
    # sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing tx_chanllege dataset')

    pin_memory = True if use_gpu else False
    query_dataset = Tx_dataset(set='query_a',
                               file_list='query_a_list.txt').dataset
    gallery_dataset = Tx_dataset(set='gallery_a',
                                 file_list='gallery_a_list.txt').dataset

    queryloader = DataLoader(ImageDataset(query_dataset,
                                          transform=build_transforms(
                                              opt, is_train=False)),
                             batch_size=opt.test_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory)

    galleryloader = DataLoader(ImageDataset(gallery_dataset,
                                            transform=build_transforms(
                                                opt, is_train=False)),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)

    queryFliploader = DataLoader(ImageDataset(query_dataset,
                                              transform=build_transforms(
                                                  opt,
                                                  is_train=False,
                                                  flip=True)),
                                 batch_size=opt.test_batch,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    galleryFliploader = DataLoader(ImageDataset(gallery_dataset,
                                                transform=build_transforms(
                                                    opt,
                                                    is_train=False,
                                                    flip=True)),
                                   batch_size=opt.test_batch,
                                   num_workers=opt.workers,
                                   pin_memory=pin_memory)

    queryCenterloader = DataLoader(ImageDataset(query_dataset,
                                                transform=build_transforms(
                                                    opt,
                                                    is_train=False,
                                                    crop='center')),
                                   batch_size=opt.test_batch,
                                   num_workers=opt.workers,
                                   pin_memory=pin_memory)

    galleryCenterloader = DataLoader(ImageDataset(gallery_dataset,
                                                  transform=build_transforms(
                                                      opt,
                                                      is_train=False,
                                                      crop='center')),
                                     batch_size=opt.test_batch,
                                     num_workers=opt.workers,
                                     pin_memory=pin_memory)

    queryLtloader = DataLoader(ImageDataset(
        query_dataset,
        transform=build_transforms(opt, is_train=False, crop='lt')),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)

    galleryLtloader = DataLoader(ImageDataset(gallery_dataset,
                                              transform=build_transforms(
                                                  opt,
                                                  is_train=False,
                                                  crop='lt')),
                                 batch_size=opt.test_batch,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    queryRtloader = DataLoader(ImageDataset(
        query_dataset,
        transform=build_transforms(opt, is_train=False, crop='rt')),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)

    galleryRtloader = DataLoader(ImageDataset(gallery_dataset,
                                              transform=build_transforms(
                                                  opt,
                                                  is_train=False,
                                                  crop='rt')),
                                 batch_size=opt.test_batch,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    queryRbloader = DataLoader(ImageDataset(
        query_dataset,
        transform=build_transforms(opt, is_train=False, crop='rb')),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)

    galleryRbloader = DataLoader(ImageDataset(gallery_dataset,
                                              transform=build_transforms(
                                                  opt,
                                                  is_train=False,
                                                  crop='rb')),
                                 batch_size=opt.test_batch,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    queryLbloader = DataLoader(ImageDataset(
        query_dataset,
        transform=build_transforms(opt, is_train=False, crop='lb')),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)

    galleryLbloader = DataLoader(ImageDataset(gallery_dataset,
                                              transform=build_transforms(
                                                  opt,
                                                  is_train=False,
                                                  crop='lb')),
                                 batch_size=opt.test_batch,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    print('initializing model ...')

    model = build_model(opt)

    if opt.pretrained_choice == 'self':
        state_dict = torch.load(opt.pretrained_model)['state_dict']
        # state_dict = {k: v for k, v in state_dict.items() \
        #        if not ('reduction' in k or 'softmax' in k)}
        model.load_state_dict(state_dict, False)
        print('load pretrained model ' + opt.pretrained_model)
    print('model size: {:.5f}M'.format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    if use_gpu:
        model = nn.DataParallel(model).cuda()
    reid_evaluator = Evaluator(model,
                               norm=opt.norm,
                               eval_flip=opt.eval_flip,
                               re_ranking=opt.re_ranking,
                               crop_validation=opt.crop_validation)

    results = reid_evaluator.extract_features(queryloader,
                                              galleryloader,
                                              queryFliploader,
                                              galleryFliploader,
                                              queryCenterloader,
                                              galleryCenterloader,
                                              queryLtloader,
                                              galleryLtloader,
                                              queryRtloader,
                                              galleryRtloader,
                                              queryLbloader,
                                              galleryLbloader,
                                              queryRbloader,
                                              galleryRbloader,
                                              k1=6,
                                              k2=2,
                                              lambda_value=0.3)

    # reid_evaluator.validation(queryloader, galleryloader)

    torch.save(
        results, './result/submission_example_A.pth'.replace(
            'submission_example_A',
            opt.pretrained_model.split('/')[-2]))
예제 #23
0
def train(**kwargs):
    opt._parse(kwargs)
    opt.model_name = 'bfe_test'
    # set random seed and cudnn benchmark
    torch.manual_seed(opt.seed)
    os.makedirs(opt.save_dir, exist_ok=True)
    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing dataset {}'.format(opt.dataset))
    dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode)

    pin_memory = True if use_gpu else False

    summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log'))

    trainloader = DataLoader(ImageData(dataset.train,
                                       TrainTransform(opt.datatype)),
                             sampler=RandomIdentitySampler(
                                 dataset.train, opt.num_instances),
                             batch_size=opt.train_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory,
                             drop_last=True)

    queryloader = DataLoader(ImageData(dataset.query,
                                       TestTransform(opt.datatype)),
                             batch_size=opt.test_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory)

    galleryloader = DataLoader(ImageData(dataset.gallery,
                                         TestTransform(opt.datatype)),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)
    queryFliploader = DataLoader(ImageData(dataset.query,
                                           TestTransform(opt.datatype, True)),
                                 batch_size=opt.test_batch,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    galleryFliploader = DataLoader(ImageData(dataset.gallery,
                                             TestTransform(opt.datatype,
                                                           True)),
                                   batch_size=opt.test_batch,
                                   num_workers=opt.workers,
                                   pin_memory=pin_memory)

    print('initializing model ...')

    model = BFE(dataset.num_train_pids, 1.0, 0.33)

    optim_policy = model.get_optim_policy()

    if opt.pretrained_model:
        state_dict = torch.load(opt.pretrained_model)['state_dict']
        # state_dict = {k: v for k, v in state_dict.items() \
        #        if not ('reduction' in k or 'softmax' in k)}
        model.load_state_dict(state_dict, False)
        print('load pretrained model ' + opt.pretrained_model)
    print('model size: {:.5f}M'.format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    if use_gpu:
        model = nn.DataParallel(model).cuda()
    reid_evaluator = ResNetEvaluator(model)

    if opt.evaluate:
        reid_evaluator.evaluate(queryloader,
                                galleryloader,
                                queryFliploader,
                                galleryFliploader,
                                re_ranking=opt.re_ranking,
                                savefig=opt.savefig)
        return

    # xent_criterion = nn.CrossEntropyLoss()
    xent_criterion = CrossEntropyLabelSmooth(dataset.num_train_pids)

    if opt.loss == 'triplet':
        embedding_criterion = TripletLoss(opt.margin)
    elif opt.loss == 'lifted':
        embedding_criterion = LiftedStructureLoss(hard_mining=True)
    elif opt.loss == 'weight':
        embedding_criterion = Margin()

    def criterion(triplet_y, softmax_y, labels):
        losses = [embedding_criterion(output, labels)[0] for output in triplet_y] + \
                 [xent_criterion(output, labels) for output in softmax_y]
        loss = sum(losses)
        return loss

    # get optimizer
    if opt.optim == "sgd":
        optimizer = torch.optim.SGD(optim_policy,
                                    lr=opt.lr,
                                    momentum=0.9,
                                    weight_decay=opt.weight_decay)
    else:
        optimizer = torch.optim.Adam(optim_policy,
                                     lr=opt.lr,
                                     weight_decay=opt.weight_decay)

    start_epoch = opt.start_epoch
    # get trainer and evaluator
    reid_trainer = cls_tripletTrainer(opt, model, optimizer, criterion,
                                      summary_writer)

    def adjust_lr(optimizer, ep):
        if ep < 10:
            lr = opt.lr * 0.1 * (ep / 10.0)  # warm_up
        elif ep < 50:
            lr = opt.lr * (ep // 5 + 1)
        elif ep < 200:
            lr = opt.lr * 10.0
        elif ep < 300:
            lr = opt.lr
        else:
            lr = opt.lr * 0.1
        for p in optimizer.param_groups:
            p['lr'] = lr

    # start training
    best_rank1 = opt.best_rank
    best_epoch = 0
    for epoch in range(start_epoch, opt.max_epoch):
        if opt.adjust_lr:
            adjust_lr(optimizer, epoch + 1)
        reid_trainer.train(epoch, trainloader)

        # skip if not save model
        if opt.eval_step > 0 and (epoch + 1) % opt.eval_step == 0 or (
                epoch + 1) == opt.max_epoch:
            if opt.mode == 'class':
                rank1 = test(model, queryloader)
            else:
                rank1 = reid_evaluator.evaluate(queryloader, galleryloader,
                                                queryFliploader,
                                                galleryFliploader)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint({
                'state_dict': state_dict,
                'epoch': epoch + 1
            },
                            is_best=is_best,
                            save_dir=opt.save_dir,
                            filename='checkpoint_ep' + str(epoch + 1) +
                            '.pth.tar')

    print('Best rank-1 {:.1%}, achived at epoch {}'.format(
        best_rank1, best_epoch))
예제 #24
0
def train(**kwargs):
    #### Part 1 : Initialization
    opt._parse(kwargs)

    torch.backends.cudnn.deterministic = True
    # set random seed and cudnn benchmark
    #torch.manual_seed(opt.seed)
    #random.seed(opt.seed)
    #np.random.seed(opt.seed)

    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    #### Part 2 : Preparing Data
    print('initializing train dataset {}'.format(opt.trainset))
    train_dataset = data_manager.init_dataset(name=opt.trainset)

    print('initializing test dataset {}'.format(opt.testset))
    test_dataset = data_manager.init_dataset(name=opt.testset)

    pin_memory = True if use_gpu else False
    pin_memory = False
    summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log'))

    collateFn = NormalCollateFn()

    if opt.sampler == "randomidentity":
        trainloader = DataLoader(
            data_manager.init_datafolder(
                opt.trainset,
                train_dataset.train,
                TrainTransform(opt.height,
                               opt.width,
                               random_erase=opt.with_randomerase),
                if_train=True),
            sampler=RandomIdentitySampler(train_dataset.train,
                                          opt.num_instances),
            batch_size=opt.train_batch,
            num_workers=opt.workers,
            pin_memory=pin_memory,
            drop_last=True,
            collate_fn=collateFn,
        )
    elif opt.sampler == "randomidentitycamera":
        trainloader = DataLoader(
            data_manager.init_datafolder(
                opt.trainset,
                train_dataset.train,
                TrainTransform(opt.height,
                               opt.width,
                               random_erase=opt.with_randomerase),
                if_train=True),
            batch_sampler=RandomIdentityCameraSampler(train_dataset.train,
                                                      opt.num_instances,
                                                      opt.train_batch),
            num_workers=opt.workers,
            pin_memory=pin_memory,
            collate_fn=collateFn,
        )

    queryloader = DataLoader(data_manager.init_datafolder(opt.testset,
                                                          test_dataset.query,
                                                          TestTransform(
                                                              opt.height,
                                                              opt.width),
                                                          if_train=False),
                             batch_size=opt.test_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory)

    galleryloader = DataLoader(data_manager.init_datafolder(
        opt.testset,
        test_dataset.gallery,
        TestTransform(opt.height, opt.width),
        if_train=False),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)

    #### Part 3 : Preparing Backbone Network
    print('initializing model ...')
    if opt.model_name in ['triplet', 'distance']:
        model, optim_policy = get_baseline_model(num_classes=None,
                                                 model='triplet')
    elif opt.model_name in ["softmax"]:
        model, optim_policy = get_baseline_model(train_dataset.num_train_pids,
                                                 model='softmax',
                                                 drop_prob=opt.drop)
    else:
        assert False, "unknown model name"
    if (not opt.model_path == 'zero') and 'tar' in opt.model_path:
        print('load pretrain reid model......' + opt.model_path)
        ckpt = torch.load(opt.model_path)
        # remove classifer
        tmp = dict()
        for k, v in ckpt['state_dict'].items():
            if opt.keep_layer:
                for i in opt.keep_layer:
                    if 'layer' + str(i) in k:
                        #print(k+" skip....")
                        continue
            if opt.keepfc or ('fc' not in k and 'classifier' not in k):
                tmp[k] = v
        ckpt['state_dict'] = tmp
        model.load_state_dict(ckpt['state_dict'], strict=False)
    print('model size: {:.5f}M'.format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    #### Part 4: Preparing Loss Functions
    if opt.margin1 is not None:
        distance_loss = DistanceLoss(margin=(opt.margin1, opt.margin2))
    else:
        distance_loss = DistanceLoss()
    tri_loss = TripletLoss(margin=opt.margin)
    xent_loss = nn.CrossEntropyLoss()

    vis = dict()
    vis['tri_acc1'] = AverageMeter()
    vis['tri_acc2'] = AverageMeter()
    vis['cls_accuracy'] = AverageMeter()
    vis['cls_loss'] = AverageMeter()

    def dist_criterion(feat,
                       targets,
                       cameras,
                       model=None,
                       paths=None,
                       epoch=0):
        dis_loss, tri_acc1, tri_acc2 = distance_loss(feat,
                                                     targets,
                                                     cameras,
                                                     model,
                                                     paths,
                                                     epoch=epoch)
        vis['tri_acc1'].update(float(tri_acc1))
        vis['tri_acc2'].update(float(tri_acc2))
        return dis_loss

    def triplet_criterion(feat, targets):
        triplet_loss, tri_accuracy, _, _ = tri_loss(feat, targets)
        vis['tri_acc1'].update(float(tri_accuracy))
        return triplet_loss

    def cls_criterion(cls_scores, targets):
        cls_loss = xent_loss(cls_scores, targets)
        _, preds = torch.max(cls_scores.data, 1)
        corrects = float(torch.sum(preds == targets.data))
        vis['cls_accuracy'].update(float(corrects / opt.train_batch))
        vis['cls_loss'].update(float(cls_loss))
        return cls_loss

    #### Part 5: Preparing Optimizer and Trainer
    optimizer, adjust_lr = get_optimizer_strategy(opt.model_name, optim_policy,
                                                  opt)
    start_epoch = opt.start_epoch

    if use_gpu:
        model = nn.DataParallel(model).cuda()
    #model=model.cuda()
    # get trainer and evaluatori
    if opt.model_name == "distance":
        reid_trainer = tripletTrainer(opt,
                                      model,
                                      optimizer,
                                      dist_criterion,
                                      summary_writer,
                                      need_cam=True)
    elif opt.model_name == 'triplet' or opt.model_name == 'triplet_fc':
        reid_trainer = tripletTrainer(opt, model, optimizer, triplet_criterion,
                                      summary_writer)
    elif opt.model_name == 'softmax':
        reid_trainer = clsTrainer(opt, model, optimizer, cls_criterion,
                                  summary_writer)

    else:
        print("Error: Unknown model name {}".format(opt.model_name))
    reid_evaluator = evaluator_manager.init_evaluator(opt.testset,
                                                      model,
                                                      flip=True)

    #### Part 6 : Training
    best_rank1 = -np.inf
    best_epoch = 0
    for epoch in range(start_epoch, opt.max_epoch):
        if opt.step_size > 0:
            current_lr = adjust_lr(optimizer, epoch)

        reid_trainer.train(epoch, trainloader)
        for k, v in vis.items():
            print("{}:{}".format(k, v.mean))
            v.reset()

        if (epoch + 1) == opt.max_epoch:
            if use_gpu and opt.num_gpu > 1:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint({
                'state_dict': state_dict,
                'epoch': epoch + 1,
            },
                            is_best=False,
                            save_dir=opt.save_dir,
                            filename='checkpoint_ep' + str(epoch + 1) +
                            '.pth.tar')

        # skip if not save model
        if (opt.eval_step > 0 and
            (epoch + 1) % opt.eval_step == 0 and epoch >= 0
                or (epoch + 1) == opt.max_epoch):
            #print('Test on '+opt.testset)
            #rank1 = reid_evaluator.evaluate(queryloader, galleryloader,normalize=opt.with_normalize)
            print('Test on ' + opt.trainset)
            if use_gpu and opt.num_gpu > 1:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint({
                'state_dict': state_dict,
                'epoch': epoch + 1,
            },
                            is_best=False,
                            save_dir=opt.save_dir,
                            filename='checkpoint_ep' + str(epoch + 1) +
                            '.pth.tar')

            rank1, mAP = reid_evaluator.evaluate(queryloader,
                                                 galleryloader,
                                                 normalize=opt.with_normalize)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1
                save_checkpoint(
                    {
                        'state_dict': state_dict,
                        'epoch': epoch + 1,
                    },
                    is_best=False,
                    save_dir=opt.save_dir,
                    filename='checkpoint_ep' + str(epoch + 1) + '.pth.tar')

    print('Best rank-1 {:.1%}, achieved at epoch {}'.format(
        best_rank1, best_epoch))