示例#1
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.benchmark = True

    use_gpu = torch.cuda.is_available()
    if args.use_cpu:
        use_gpu = False

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
            (256, 128)
    query_img = args.query
    gallery_path = args.gallery

    transform = Compose([
        RectScale(args.height, args.width),
        ToTensor(),
        Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    query_loader = DataLoader(RawDatasetPath(glob.glob(query_img + '/*.jpg'),
                                             transform=transform),
                              batch_size=1,
                              shuffle=False,
                              num_workers=args.workers,
                              pin_memory=use_gpu,
                              drop_last=False)

    gallery_loader = DataLoader(RawDatasetPath(glob.glob(gallery_path +
                                                         '/*.jpg'),
                                               transform=transform),
                                batch_size=16,
                                shuffle=False,
                                num_workers=args.workers,
                                pin_memory=use_gpu,
                                drop_last=False)

    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=751,
                          cut_at_pooling=False,
                          FCN=True)

    # Load from checkpoint
    print('Loading model ...')
    if args.load_weights and check_isfile(args.load_weights):
        load_checkpoint(model, args.load_weights)

    model = nn.DataParallel(model).cuda() if use_gpu else model

    distmat = inference(model, query_loader, gallery_loader, use_gpu)

    if args.visualize_ranks:
        # Do some visualize ranks
        pass
def main(args):
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    #device_ids = [0, 1, 2, 3]
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, query_loader, gallery_loader = \
        get_data(args.dataset,  args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 )

    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=num_classes,
                          cut_at_pooling=False,
                          FCN=True)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model_dict = model.state_dict()
        checkpoint_load = {
            k: v
            for k, v in (checkpoint['state_dict']).items() if k in model_dict
        }
        model_dict.update(checkpoint_load)
        model.load_state_dict(model_dict)
        #        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))

    #model = nn.DataParallel(model)
    model = nn.DataParallel(model).cuda()

    # Evaluator
    evaluator = Evaluator(model)

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint('checkpoint.pth.tar')
    model.module.load_state_dict(checkpoint['state_dict'])
    evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                       dataset.gallery)
示例#3
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    #if args.height is None or args.width is None:
    #    args.height, args.width = (224, 224)
    #dataset, num_classes, train_loader, test_loader = \
    #    get_data(args.dataset, args.split, args.data_dir, args.height,
    #             args.width, args.batch_size, args.num_instances, args.workers,
    #             args.combine_trainval)

    query_loader, gallery_loader = get_real_test_data(args.query_dir,
                                                      args.gallery_dir,
                                                      args.height, args.width,
                                                      args.batch_size,
                                                      args.workers)
    #query_loader, _ = get_real_test_data(args.query_dir, args.gallery_dir, args.height, args.width, args.batch_size, args.workers)

    # Create model
    model = models.create(args.arch,
                          num_classes=args.num_classes,
                          num_features=args.features,
                          test_attribute=True)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.weights:
        checkpoint = load_checkpoint(args.weights)
        model.load_state_dict(checkpoint['state_dict'])
    if args.resume and not args.weights:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))

    model = nn.DataParallel(model).cuda()

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
        return

    # Test
    evaluator.evaluate(query_loader, gallery_loader)
示例#4
0
def main_worker(args):
    cudnn.benchmark = True

    log_dir = osp.dirname(args.resume)
    sys.stdout = Logger(osp.join(log_dir, 'log_test.txt'))

    # Create data loaders
    dataset_target, test_loader_target = \
        get_data(args.dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    model = models.create(args.arch, num_features=args.features, num_classes=0)
    model.cuda()
    model = nn.DataParallel(model)

    # Load from checkpoint
    checkpoint = load_checkpoint(args.resume)
    copy_state_dict(checkpoint, model)

    # Evaluator
    evaluator = Evaluator(model)
    print("Testing...")
    evaluator.evaluate(test_loader_target,
                       dataset_target.query,
                       dataset_target.gallery,
                       cmc_flag=True,
                       args=args,
                       rerank=args.rerank)
    return
示例#5
0
def main(args):
    print('running...')
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    print(args)
    # Create data loaders
    if args.target_height is None or args.target_width is None:
        args.target_height, args.target_width = (288, 384)

    # Create model
    model = Combine_Net(arch=args.arch,
                        num_classes=args.num_classes,
                        num_features=args.num_features)

    # Load from checkpoint
    checkpoint = load_checkpoint(args.resume)
    model.net.load_state_dict(checkpoint['state_dict'])
    #model = nn.DataParallel(model, [0,1]).cuda()
    model = nn.DataParallel(model, [0, 1, 2, 3]).cuda()
    query_loader, gallery_loader = get_real_test_data(
        args.query_dir, args.gallery_dir, args.target_height,
        args.target_width, args.batch_size, args.workers)
    #query_features = extract_features(model, query_loader, is_flip=True)
    gallery_features = extract_features(model, gallery_loader, is_flip=True)
    #q_file = open('query_se_res152.pkl', 'wb')
    g_file = open('your_gallery_feature_name.pkl', 'wb')
    #pickle.dump(query_features, q_file)
    pickle.dump(gallery_features, g_file)
    #q_file.close()
    g_file.close()
    print('Done')
示例#6
0
def main(args):
    dataset, num_classes, train_loader, query_loader, gallery_loader = \
        get_data(args.dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.re, args.workers)
    model = models.create('resnet50', num_features=1024,
                          dropout=0.5, num_classes=13164)
    model = model.cuda()
    checkpoint = load_checkpoint('./checkpointres50.pth.tar')
    model.load_state_dict(checkpoint['state_dict'])

    for batch_idx, (imgs, _, pids) in enumerate(train_loader):
        imgs = imgs.cuda()
        featurelist, _ = model(imgs)
        all = featurelist[6].data.cpu()
        all= F.normalize(all)
        break
    for batch_idx, (imgs, _, pids) in enumerate(train_loader):
        imgs = imgs.cuda()
        featurelist, _ = model(imgs)
        features = featurelist[6].data.cpu()
        features=F.normalize(features)
        all = torch.cat((all, features), 0)
        print(batch_idx)
        if batch_idx == 100:
            torch.save(all, './renet50vidlayer6.pkl')
            break
    torch.save(all, './renet50vidlayer6.pkl')
    print('done!')
示例#7
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True  # Not sure about this one
    metric = DistanceMetric(algorithm='euclidean')

    for test_set in args.test_sets:
        dataset, _, loader = load_dataset(args.architecture, dataset=test_set)
        cmcs = {}
        for batch_id in args.batch_ids:
            benchmark_dir = os.path.join(working_dir, 'benchmarks', batch_id,
                                         args.loss, args.architecture)
            _, num_classes, _ = load_dataset(args.architecture,
                                             dataset='synthetic',
                                             batch_id=batch_id)

            model = setup_model(args.loss, args.architecture, num_classes)
            model = nn.DataParallel(model).cuda()
            checkpoint = load_checkpoint(
                os.path.join(benchmark_dir, 'model_best.pth.tar'))
            model.module.load_state_dict(checkpoint['state_dict'])
            evaluator = Evaluator(model)

            cmcs[batch_id] = evaluator.test(loader, dataset.query,
                                            dataset.gallery, metric)

        plot(args, os.path.join(working_dir, 'plots'), cmcs, test_set)
示例#8
0
def checkpoint_loader(model, path):
    checkpoint = load_checkpoint(path)
    pretrained_dict = checkpoint['state_dict']
    if isinstance(model, nn.DataParallel):
        Parallel = 1
        model = model.module.cpu()
    else:
        Parallel = 0

    model_dict = model.state_dict()
    # 1. filter out unnecessary keys
    pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
    # if eval_only:
    #     keys_to_del = []
    #     for key in pretrained_dict.keys():
    #         if 'classifier' in key:
    #             keys_to_del.append(key)
    #     for key in keys_to_del:
    #         del pretrained_dict[key]
    #     pass
    # 2. overwrite entries in the existing state dict
    model_dict.update(pretrained_dict)
    # 3. load the new state dict
    model.load_state_dict(model_dict)

    start_epoch = checkpoint['epoch']
    best_top1 = checkpoint['best_top1']

    if Parallel:
        model = nn.DataParallel(model).cuda()

    return model, start_epoch, best_top1
示例#9
0
def loadDataset():
    torch.cuda.set_device(0)
    logs_dir = 'market-1501-Exper33/RPP/'
    num_features = 256
    num_classes = 751
    T = 1
    dim = 256
    dropout = 0.5

    model = models.create('resnet50_rpp',
                          num_features=num_features,
                          dropout=dropout,
                          num_classes=num_classes,
                          cut_at_pooling=False,
                          FCN=True,
                          T=T,
                          dim=dim)
    model = model.cuda()
    checkpoint = load_checkpoint(
        osp.join(logs_dir, 'cvs_checkpoint_0107.pth.tar'))
    model.load_state_dict(checkpoint['state_dict'])

    res = []
    frame_number = 0

    # --datasets
    shujuku = {}
    rentidir = '/data/reid/renti/queries'

    return model
示例#10
0
文件: test.py 项目: wyf27/DLMB-PB
def main(args):
    cudnn.benchmark = True
    # Redirect print to both console and log file

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    detectmodel, ic = getdetection()
    dataset, num_classes, train_loader, query_loader, gallery_loader = \
        get_data(detectmodel, ic, args.dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.re, args.workers)

    model = models.create(args.arch,
                          num_features=512,
                          dropout=args.dropout,
                          num_classes=13164)
    model = model.cuda()
    checkpoint = load_checkpoint(
        '/home/l603a/REID/wyf/DLMB-PB/resnet18_hd_nodropout_FC_three_branch.pth.tar'
    )
    model.load_state_dict(checkpoint['state_dict'])
    top1, map = test_vehicleid(model,
                               query_loader,
                               gallery_loader,
                               query_loader,
                               gallery_loader,
                               32,
                               'xent',
                               euclidean_distance_loss='xent',
                               epoch=0,
                               use_metric_cuhk03=False,
                               ranks=[1, 5, 10, 20],
                               return_distmat=False)
示例#11
0
 def resume(self, ckpt_file):
     print("continued from", ckpt_file)
     model = models.create(self.model_name,
                           dropout=self.dropout,
                           num_classes=self.num_classes)
     self.model = nn.DataParallel(model).cuda()
     self.model.load_state_dict(load_checkpoint(ckpt_file))
示例#12
0
def main(args):
    print('running...')
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    print(args)
    # Create data loaders
    if args.target_height is None or args.target_width is None:
        args.target_height, args.target_width = (288, 384)

    # Create model
    model = Combine_Net(arch=args.arch,
                        num_classes=args.num_classes,
                        num_features=args.num_features)

    # Load from checkpoint
    checkpoint = load_checkpoint(args.resume)
    model.net.load_state_dict(checkpoint['state_dict'])
    device_ids = [0, 1, 2, 3]
    model = nn.DataParallel(model, device_ids)
    model = model.cuda(device_ids[0])
    root_path = args.data_dir
    sub_folders = os.listdir(root_path)
    for folder in sub_folders:
        print(folder)
        data_dir = root_path + folder
        data_loader = get_real_test_data(data_dir, args.target_height,
                                         args.target_width, args.batch_size,
                                         args.workers)
        direct_features = extract_features(model, data_loader, is_flip=False)
        p_file = open(folder + '_direction.pkl', 'wb')
        pickle.dump(direct_features, p_file)
        p_file.close()
    print('Done')
示例#13
0
def main_worker(args):
    cudnn.benchmark = True

    log_dir = osp.dirname(args.resume)
    sys.stdout = Logger(osp.join(log_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    # Create data loaders
    dataset_target, test_loader_target = \
        get_data(args.dataset_target, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    model = models.create(args.arch, pretrained=False, cut_at_pooling=args.cut_at_pooling,
                          num_features=args.features, dropout=args.dropout, num_classes=0)
    model.cuda()
    model = nn.DataParallel(model)

    # Load from checkpoint
    checkpoint = load_checkpoint(args.resume)
    copy_state_dict(checkpoint['state_dict'], model)
    # start_epoch = checkpoint['epoch']
    # best_mAP = checkpoint['best_mAP']
    # print("=> Checkpoint of epoch {}  best mAP {:.1%}".format(start_epoch, best_mAP))

    # Evaluator
    evaluator = Evaluator(model)
    print("Test on the target domain of {}:".format(args.dataset_target))
    evaluator.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True, rerank=args.rerank)
    return
示例#14
0
def test_with_open_reid(args):
    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)
    # Create model
    model = models.create(args.arch,
                          num_features=1024,
                          dropout=args.dropout,
                          num_classes=args.features)

    model = nn.DataParallel(model).cuda()
    print('Test with best model:')
    # checkpoint = load_checkpoint(osp.join(args.logs_dir, 'checkpoint.pth.tar'))
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)
    # Evaluator
    evaluator = Evaluator(model)
    metric.train(model, train_loader)
    print("Validation:")
    evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
    print("Test:")
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
示例#15
0
def loadDataset():
    logs_dir = 'market-1501-Exper33/RPP/'
    num_features = 256
    num_classes = 751
    T = 1
    dim = 256
    dropout = 0.5

    ###
    model = models.create('resnet50_rpp',
                          num_features=num_features,
                          dropout=dropout,
                          num_classes=num_classes,
                          cut_at_pooling=False,
                          FCN=True,
                          T=T,
                          dim=dim)
    model = model.cuda()
    checkpoint = load_checkpoint(osp.join(logs_dir, 'checkpoint.pth.tar'))
    model.load_state_dict(checkpoint['state_dict'])

    res = []
    frame_number = 0

    return model
示例#16
0
 def resume(self, ckpt_file, step):
     print("continued from step", step)
     model = models.create(self.model_name,
                           dropout=self.dropout,
                           embeding_fea_size=self.embeding_fea_size,
                           fixed_layer=self.fixed_layer)
     self.model = nn.DataParallel(model).cuda()
     self.model.load_state_dict(load_checkpoint(ckpt_file))
示例#17
0
 def resume(self, ckpt_file, step):
     print("continued from step", step)
     model = models.create(self.model_name,
                           dropout=self.dropout,
                           num_classes=self.num_classes,
                           is_output_feature=True)
     self.model = nn.DataParallel(model).cuda()
     self.model.load_state_dict(load_checkpoint(ckpt_file))
示例#18
0
def init_model(model_path):
    model = models.create('resnet50',
                          num_features=128,
                          num_classes=216,
                          dropout=0)
    checkpoint = load_checkpoint(model_path)
    model.load_state_dict(checkpoint['state_dict'])
    model = torch.nn.DataParallel(model).cuda()
    return model
示例#19
0
def create_model(args, classes):
    model = models.create(args.arch, num_features=args.features, norm=False, dropout=args.dropout, num_classes=classes)

    model.cuda()
    model = nn.DataParallel(model)

    initial_weights = load_checkpoint(args.init)
    copy_state_dict(initial_weights['state_dict'], model)

    return model
def main(args):
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    sys.stdout = Logger(
        osp.join(args.logs_dir, 'log-partial-reid-group-test.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset_set, num_classes, query_loader_set, gallery_loader_set = \
        get_data(args.dataset,  args.data_dir, args.height,
                 args.width, args.ratio, args.batch_size, args.workers
                 )

    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=num_classes,
                          cut_at_pooling=False,
                          FCN=True,
                          num_parts=args.num_parts)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model_dict = model.state_dict()
        checkpoint_load = {
            k: v
            for k, v in (checkpoint['state_dict']).items() if k in model_dict
        }
        model_dict.update(checkpoint_load)
        model.load_state_dict(model_dict)
        #        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))

    model = nn.DataParallel(model).cuda()

    # Evaluator
    evaluator = Evaluator(model)

    print("Test:")
    with torch.no_grad():
        evaluator.evaluate(query_loader_set, gallery_loader_set, dataset_set)
    return
示例#21
0
 def resume(self, ckpt_file, step):
     print("continued from step", step)
     model = models.create(self.model_name,
                           dropout=self.dropout,
                           num_classes=self.num_classes,
                           mode=self.mode)
     # self.model = nn.DataParallel(model).cuda()
     model.load_state_dict(load_checkpoint(ckpt_file), strict=False)
     model_distill = deepcopy(model)
     # model.load_state_dict(load_checkpoint_new(ckpt_file), strict=False)
     self.model = nn.DataParallel(model).cuda()
     self.model_distill = nn.DataParallel(model_distill).cuda()
示例#22
0
def create_model(args):
    model = models.create(args.arch,
                          num_features=args.features,
                          norm=True,
                          dropout=args.dropout,
                          num_classes=0)
    # use CUDA
    model = model.cuda()
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.copy_weight(checkpoint['state_dict'])
    model = nn.DataParallel(model)
    return model
示例#23
0
def getdetection():
    model = models.create('resnet50', num_features=1024,
                          dropout=0.5, num_classes=13164)
    model = model.cuda()
    model.eval()
    checkpoint = load_checkpoint('./checkpointres50.pth.tar')
    model.load_state_dict(checkpoint['state_dict'])
    label = torch.load('./renet50layer6label10class15000.pkl')
    Ic = np.zeros(1024)
    for i in range(0, 1024):
        if label[i] == 4:
            Ic[i] = 1
    Ic = torch.from_numpy(Ic)
    return model ,Ic
示例#24
0
    def _init_models(self):
        #self.net_G = CustomPoseGenerator(self.opt.pose_feature_size, 2048, self.opt.noise_feature_size,
        self.net_G = CustomPoseGenerator(self.opt.pose_feature_size, 2048, 0, pose_nc=self.pose_size, dropout=self.opt.drop,
                                         norm_layer=self.norm_layer, fuse_mode=self.opt.fuse_mode,
                                         connect_layers=self.opt.connect_layers)

        if (self.opt.emb_type == 'Single'):
            self.net_E = SingleNet(self.opt.arch, self.emb_size, pretraind=True, use_bn=True, test_bn=False, last_stride=self.opt.last_stride)
        elif  (self.opt.emb_type == 'Siamese'):  
            self.net_E = SiameseNet(self.opt.arch, self.emb_size, pretraind=True, use_bn=True, test_bn=False, last_stride=self.opt.last_stride)
        else:
            raise ValueError('unrecognized model')

        self.net_Di = SingleNet('resnet18', 1, pretraind=True, use_bn=True, test_bn=False, last_stride=2)

        self.net_Dp = NLayerDiscriminator(3+self.pose_size, norm_layer=self.norm_layer)

        if self.opt.stage==0: # This is for training end-to-end
            init_weights(self.net_G)
            init_weights(self.net_Dp)
        elif self.opt.stage==1: # This is for training fixing a baseline model
            init_weights(self.net_G)
            init_weights(self.net_Dp)
            checkpoint = load_checkpoint(self.opt.netE_pretrain)
            
            if 'state_dict' in checkpoint.keys():
                checkpoint = checkpoint['state_dict']
            state_dict = remove_module_key(checkpoint)

            self.net_E.load_state_dict(state_dict)
            #state_dict['classifier.weight'] = state_dict['classifier.weight'][1:2]
            #state_dict['classifier.bias'] = torch.FloatTensor([state_dict['classifier.bias'][1]])
            #self.net_Di.load_state_dict(state_dict)
        elif self.opt.stage==2: # This is for training with a provided model
            self._load_state_dict(self.net_E, self.opt.netE_pretrain)
            self._load_state_dict(self.net_G, self.opt.netG_pretrain)
            self._load_state_dict(self.net_Di, self.opt.netDi_pretrain)
            self._load_state_dict(self.net_Dp, self.opt.netDp_pretrain)
        else:
            raise ValueError('unrecognized mode')

        self.net_E = torch.nn.DataParallel(self.net_E).cuda()
        self.net_G = torch.nn.DataParallel(self.net_G).cuda()
        self.net_Di = torch.nn.DataParallel(self.net_Di).cuda()
        self.net_Dp = torch.nn.DataParallel(self.net_Dp).cuda()
示例#25
0
def main(argv):
    #parser
    parser = argparse.ArgumentParser(description='test part bilinear network')
    parser.add_argument('--exp-dir', type=str, default='logs/market1501/exp1')
    parser.add_argument('--target-epoch', type=int, default=750)
    parser.add_argument('--gpus', type=str, default='0')
    args = parser.parse_args(argv)

    # Settings
    exp_dir = args.exp_dir
    target_epoch = args.target_epoch
    batch_size = 50
    gpu_ids = args.gpus

    set_paths('paths')
    os.environ['CUDA_VISIBLE_DEVICES'] = gpu_ids
    args = json.load(open(osp.join(exp_dir, "args.json"), "r"))

    # Load data
    t = T.Compose([
        T.RectScale(args['height'], args['width']),
        T.CenterCrop((args['crop_height'], args['crop_width'])),
        T.ToTensor(),
        T.RGB_to_BGR(),
        T.NormalizeBy(255),
    ])
    dataset = datasets.create(args['dataset'],
                              'data/{}'.format(args['dataset']))
    dataset_ = Preprocessor(list(set(dataset.query) | set(dataset.gallery)),
                            root=dataset.images_dir,
                            transform=t)
    dataloader = DataLoader(dataset_, batch_size=batch_size, shuffle=False)

    # Load model
    model = models.create(args['arch'],
                          dilation=args['dilation'],
                          use_relu=args['use_relu'],
                          initialize=False).cuda()
    weight_file = osp.join(exp_dir, 'epoch_{}.pth.tar'.format(target_epoch))
    model.load(load_checkpoint(weight_file))
    model.eval()

    # Evaluate
    evaluator = Evaluator(model)
    evaluator.evaluate(dataloader, dataset.query, dataset.gallery)
示例#26
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    name = f'{args.dataset}-{args.arch}'
    logs_dir = f'logs/softmax-loss/{name}'

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          cos_output=args.cos_output)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    checkpoint = load_checkpoint(args.resume)
    model.load_state_dict(checkpoint['state_dict'], strict=False)
    start_epoch = checkpoint['epoch']
    best_top1 = checkpoint['best_top1']
    print("=> Start epoch {}  best top1 {:.1%}".format(start_epoch, best_top1))
    model = model.cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model, normalize_features=True)  # args.cos_output)
    metric.train(model, train_loader)
    print("Validation:")
    evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
    print("Test:")
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
示例#27
0
    def __init__(self,
                 depth,
                 checkpoint=None,
                 pretrained=True,
                 num_features=2048,
                 dropout=0.1,
                 num_classes=0,
                 numCams=0):
        super(ResNet, self).__init__()

        self.depth = depth
        self.checkpoint = checkpoint
        self.pretrained = pretrained
        self.num_features = num_features
        self.dropout = dropout
        self.num_classes = num_classes

        if self.dropout > 0:
            self.drop = nn.Dropout(self.dropout)
        # Construct base (pretrained) resnet
        if depth not in ResNet.__factory:
            raise KeyError("Unsupported depth:", depth)
        self.base = ResNet.__factory[depth](pretrained=pretrained)
        out_planes = self.base.fc.in_features

        # resume from pre-iteration training
        if self.checkpoint:
            state_dict = load_checkpoint(checkpoint)
            self.load_state_dict(state_dict['state_dict'], strict=False)

        self.feat = nn.Linear(out_planes, self.num_features, bias=False)
        self.feat_bn = nn.BatchNorm1d(self.num_features)
        self.relu = nn.ReLU(inplace=True)
        init.normal(self.feat.weight, std=0.001)
        init.constant(self.feat_bn.weight, 1)
        init.constant(self.feat_bn.bias, 0)

        # x2 classifier
        self.classifier_x2 = nn.Linear(self.num_features, self.num_classes)
        init.normal(self.classifier_x2.weight, std=0.001)
        init.constant(self.classifier_x2.bias, 0)

        if not self.pretrained:
            self.reset_params()
示例#28
0
def loadDataset():
    torch.cuda.set_device(0)
    logs_dir = 'market-1501-Exper33/RPP/'
    num_features = 256
    num_classes = 751
    T = 1
    dim = 256
    dropout = 0.5

    ###
    model = models.create('resnet50_rpp',
                          num_features=num_features,
                          dropout=dropout,
                          num_classes=num_classes,
                          cut_at_pooling=False,
                          FCN=True,
                          T=T,
                          dim=dim)
    model = model.cuda()
    checkpoint = load_checkpoint(osp.join(logs_dir, 'checkpoint.pth.tar'))
    model.load_state_dict(checkpoint['state_dict'])

    res = []
    frame_number = 0

    # --datasets
    shujuku = {}
    rentidir = '/data/reid/renti/queries'
    for query in os.listdir(rentidir):
        query_dir = osp.join(rentidir, query)
        shujuku[query] = []
        for img in os.listdir(query_dir):
            _img = cv2.imread(osp.join(query_dir, img))
            _img = np.transpose(_img, (2, 0, 1)).astype(np.float32)
            _img = torch.from_numpy(_img)
            _img = torch.unsqueeze(_img, 0)
            _feature = extract_cnn_feature(model, _img.cuda())
            shujuku[query].append(_feature)

            # --

    return model, shujuku
示例#29
0
def load_params(new_model, pretrained_model):
    new_model_dict = new_model.module.state_dict()
    pretrained_checkpoint = load_checkpoint(pretrained_model)
    #for name, param in pretrained_checkpoint.items():
    for name, param in pretrained_checkpoint.items():
        print('pretrained_model params name and size: ', name, param.size())

        if name in new_model_dict and 'classifier' not in name:
            if isinstance(param, Parameter):
                param = param.data
            try:
                new_model_dict[name].copy_(param)
                print('############# new_model load params name: ',name)
            except:
                raise RuntimeError('While copying the parameter named {}, ' +
                                   'whose dimensions in the model are {} and ' +
                                   'whose dimensions in the checkpoint are {}.'
                                   .format(name, new_model_dict[name].size(), param.size()))
        else:
            continue
示例#30
0
def eval(save_dir):
    mAP = []
    Acc = []
    features = []
    for idx, config in enumerate(configs):
        model = models.create(
            config.model_name,
            num_features=config.num_features,
            dropout=config.dropout,
            num_classes=config.num_classes)
        model = torch.nn.DataParallel(model).cuda()
        model_name = MODEL[idx]
        feature = []
        for epoch in range(5):
            save_pth = os.path.join(save_dir, '%s.epoch%s' % (model_name, epoch))

            if os.path.exists(save_pth) is not True:
                raise ValueError('wrong model pth %s' % save_pth)
            checkpoint = load_checkpoint(save_pth)
            state_dict = {
                k: v
                for k, v in checkpoint['state_dict'].items()
                if k in model.state_dict().keys()
            }
            model.load_state_dict(state_dict)
            result = checkpoint['performance']
            mAP += [str(result[0])]
            Acc += [str(result[1])]

            feature += [mu.get_feature(model, query_gallery, data.images_dir, config)]
        features += [feature]


    for idx in range(5):
        feas = [features[j][idx] for j in range(3)]
        result = mu.combine_evaluate(feas, data)
        mAP += [str(result[0])]
        Acc += [str(result[1])]
    return mAP, Acc
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    logger = TensorLogger(osp.join(args.log_root, 'Tensorboard_logs', args.logs_dir))
    # Redirect print to both console and log file
    logs_dir = osp.join(args.log_root, args.logs_dir)
    if not args.evaluate:
        sys.stdout = Logger(osp.join(logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.source_batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    # Create source data_loader
    source_dataset, source_num_classes, source_train_loader\
        , source_val_loader, source_test_loader = \
        get_source_data(args.source_dataset, args.split, args.data_dir, args.height,
                 args.width, args.source_batch_size, args.num_instances, args.workers,
                 args.combine_trainval)
    # Create target data_loader
    target_dataset, target_num_classes, target_train_loader\
        , target_val_loader, target_test_loader = \
        get_target_data(args.target_dataset, args.split, args.data_dir, args.height,
                 args.width, args.target_batch_size, args.workers,
                 args.combine_trainval)
    print("lr:", args.lr)
    print("max_epoch:", args.epochs)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = ResNet_recon(num_features=1024, dropout=args.dropout)
    if args.evaluate:
        model_evalu = nn.DataParallel(model).cuda()
    model = model.cuda(0)

    # For source triplet-loss
    trip_embedding = Trip_embedding(num_features=1024,
                                    num_diff_features=128, dropout=args.dropout).cuda(1)
    # For target reconstruction-loss
    recon_module = Reconstruct(num_features=1024).cuda(1)
    # Criterion
    criterion = ReconTripLoss().cuda(1)
    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        model_path = load_checkpoint(args.resume)
        model.load_state_dict(model_path['state_dict'])
        # trip_embedding.load_state_dict(model_path['trip_em'])
        recon_module.load_state_dict(model_path['recon_dict'])
        start_epoch = model_path['epoch']
        best_top1 = model_path['best_top1']
        is_best = False
        top1 = best_top1
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    # model = nn.DataParallel(model, device_ids=[0,1]).cuda(1)

    # model.cuda(0)
    # trip_embedding.cuda(1)
    # recon_module.cuda(1)
    # criterion.cuda(1)
    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator

    if args.evaluate:
        evaluator = Evaluator(model_evalu)
        metric.train(model_evalu, source_train_loader)
        # print("Validation:")
        # evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        if len(args.source_dataset) > 1:
            for dataset_name in args.source_dataset:
                print("{} source_test result:".format(dataset_name))
                evaluator.evaluate(source_test_loader[dataset_name],
                                   source_dataset.query[dataset_name],
                                   source_dataset.gallery[dataset_name],
                                   metric)
            return
        else:
            print("source test result")
            evaluator.evaluate(source_test_loader, source_dataset.query,
                               source_dataset.gallery, metric)
            print("target test result")
            evaluator.evaluate(target_test_loader, target_dataset.query,
                               target_dataset.gallery, metric)
            return

    evaluator = Evaluator(model)
    # Optimizer
    optimizer = torch.optim.Adam([{'params': model.parameters()},
                                  # {'params': trip_embedding.parameters()},
                                  {'params': recon_module.parameters()}],
                                  lr=args.lr, weight_decay=args.weight_decay)
    # Trainer
    trainer = Transfer_Trainer(model, recon_module, trip_embedding, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= args.lr_change_epochs else \
            args.lr * (0.001 ** ((epoch - args.lr_change_epochs)/
                                 float(args.epochs-args.lr_change_epochs)))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
    # epoch_num = args.maxstep//(750//18) + 1
    # Start training
    top1 = 0
    is_best = True
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, source_train_loader, target_train_loader, optimizer, logger)
        #######Tensorboard-logs##########
        # for tag, value in model.named_parameters():
        #     tag = tag.replace('.', '/')
        #     try:
        #         logger.histo_summary(tag, to_np(value), epoch * len(source_train_loader) + 1)
        #         logger.histo_summary(tag + '/grad', to_np(value.grad), epoch * len(source_train_loader) + 1)
        #     except AttributeError, e:
        #         pass
        if epoch % 25 == 0 and epoch != 0:
            top1 = evaluator.evaluate(source_test_loader, source_dataset.query, source_dataset.query)
            target_top1 = evaluator.evaluate(target_test_loader, target_dataset.query, target_dataset.query)
            print('target_top1 = {:5.1%}'.format(target_top1))
            # top1 = evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
            is_best = top1 > best_top1
            best_top1 = max(top1, best_top1)

        save_checkpoint({
            'state_dict': model.state_dict(),
            'recon_dict': recon_module.state_dict(),
            # 'trip_em': trip_embedding.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(logs_dir, 'checkpoint.pth.tar'))
        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(logs_dir, 'checkpoint.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, source_train_loader)
    print("Test:")
    if len(args.dataset) > 1:
        for dataset_name in args.dataset:
            print("{} test result:".format(dataset_name))
            evaluator.evaluate(source_test_loader[dataset_name], source_dataset.query[dataset_name],
                               source_dataset.gallery[dataset_name], metric)
    else:
        evaluator.evaluate(source_test_loader, source_dataset.query, source_dataset.gallery, metric)
示例#32
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    logger = TensorLogger(osp.join(args.log_root, 'Tensorboard_logs', args.logs_dir))
    # Redirect print to both console and log file
    logs_dir = osp.join(args.log_root, args.logs_dir)
    if not args.evaluate:
        sys.stdout = Logger(osp.join(logs_dir, 'log.txt'))

    # Create data loaders
    # assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)
    print("lr:", args.lr)
    print("max_epoch:", args.epochs)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch, num_features=1024,
                          num_diff_features=128,
                          dropout=args.dropout,
                          cut_at_pooling=False)
    print(model)
    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        pretrained_dict = {k: v for k, v in checkpoint['state_dict'].items()
                           if k in model.state_dict()}
        model_dict = model.state_dict()
        model_dict.update(pretrained_dict)
        model.load_state_dict(model_dict)
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        # print("Validation:")
        # evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        if len(args.dataset)>1:
            for dataset_name in args.dataset:
                print("{} test result:".format(dataset_name))
                evaluator.evaluate(test_loader[dataset_name], dataset.query[dataset_name],
                                   dataset.gallery[dataset_name], metric)
            return
        else:
            evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
            return

    # Criterion
    criterion = TripletLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        # lr = args.lr if epoch <= 270 else \
        #     args.lr * (0.001 ** ((epoch - 270) / 135))
        # lr = args.lr if epoch <= 100 else \
        #     args.lr * (0.001 ** ((epoch - 100) / 50.0))
        lr = args.lr if epoch <= args.lr_change_epochs else \
            args.lr * (0.001 ** ((epoch - args.lr_change_epochs)/float(args.epochs-args.lr_change_epochs)))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
    # epoch_num = args.maxstep//(750//18) + 1
    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer, logger)
        #######Tensorboard-logs##########
        for tag, value in model.named_parameters():
            tag = tag.replace('.', '/')
            try:
                logger.histo_summary(tag, to_np(value), epoch*len(train_loader) + 1)
                logger.histo_summary(tag + '/grad', to_np(value.grad), epoch*len(train_loader) + 1)
            except AttributeError:
                pass
        # top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)
        # top1 = evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
        top1 = 1
        # is_best = top1 > best_top1
        # best_top1 = max(top1, best_top1)
        # save_checkpoint({
        #     'state_dict': model.module.state_dict(),
        #     'epoch': epoch + 1,
        #     'best_top1': best_top1,
        # }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
        is_best = False
        best_top1 = 1
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(logs_dir, 'checkpoint.pth.tar'))
        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(logs_dir, 'checkpoint.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    print("Test:")
    if len(args.dataset) > 1:
        for dataset_name in args.dataset:
            print("{} test result:".format(dataset_name))
            evaluator.evaluate(test_loader[dataset_name], dataset.query[dataset_name],
                               dataset.gallery[dataset_name], metric)
    else:
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
示例#33
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
            (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    # Create model
    model = models.create(args.arch, num_features=args.features, norm=True,
                          dropout=args.dropout)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = OIMLoss(model.module.num_features, num_classes,
                        scalar=args.oim_scalar,
                        momentum=args.oim_momentum).cuda()

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [p for p in model.parameters() if
                      id(p) not in base_param_ids]
        param_groups = [
            {'params': model.module.base.parameters(), 'lr_mult': 0.1},
            {'params': new_params, 'lr_mult': 1.0}]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 60 if args.arch == 'inception' else 40
        lr = args.lr * (0.1 ** (epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
示例#34
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch, num_features=1024,
                          dropout=args.dropout, num_classes=args.features)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = TripletLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= 100 else \
            args.lr * (0.001 ** ((epoch - 100) / 50.0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
def main(args):
    # os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True
    #######Tensorboard-logs##########
    logger = TensorLogger(args.Tensorlogs_dir)
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log5.txt'))
    # sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)
    now = datetime.datetime.now()
    print(now.strftime('%Y-%m-%d %H:%M:%S'))
    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch, num_features=1024,
                          dropout=args.dropout, num_diff_features=args.features, \
                          pretrained=True)

    # model_path = load_checkpoint(args.model_path)
    # model.load_state_dict(model_path['state_dict'])

    # Criterion
    criterion = AdaptTripletLoss(margin=args.margin, num_feature=args.features).cuda()
    # criterion = TripletLoss(margin=args.margin,\
    #                          metric_embedding=args.metric_embedding).cuda()

    start_epoch = best_top1 = top1 = 0
    is_best = False
    if args.resume_from_trip:
        model_path = load_checkpoint(args.resume_from_trip)
        model.load_state_dict(model_path['state_dict'])
        start_epoch = model_path['epoch']
        best_top1 = model_path['best_top1']
        is_best = False
        top1 = best_top1
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    if args.resume:
        model_path = load_checkpoint(args.resume)
        model.load_state_dict(model_path['state_dict'])
        criterion.load_state_dict(model_path['adapt_metric'])
        start_epoch = model_path['epoch']
        best_top1 = model_path['best_top1']
        is_best = False
        top1 = best_top1
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    # Load from checkpoint

    # if args.resume:
    #     checkpoint = load_checkpoint(args.resume)
    #     model.load_state_dict(checkpoint['state_dict'])
    #     start_epoch = checkpoint['epoch']
    #     best_top1 = checkpoint['best_top1']
    #     print("=> Start epoch {}  best top1 {:.1%}"
    #           .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = ADP_Evaluator(model, criterion)
    # evaluator = Evaluator(model)
    if args.evaluate:
        # metric.train(model, train_loader)
        # print("Validation:")
        # evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        if len(args.dataset) > 1:
            for dataset_name in args.dataset:
                print("{} test result:".format(dataset_name))
                evaluator.evaluate(test_loader[dataset_name], dataset.query[dataset_name],
                                   dataset.gallery[dataset_name])
            return
        else:
            evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
            return

    # Optimizer
    # optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
    #                              weight_decay=args.weight_decay)

    if args.only_train_metric:
        optimizer = torch.optim.Adam(criterion.parameters(), lr=args.lr,
                                     weight_decay = args.weight_decay)

        for param in model.parameters():
            param.requires_grad = False
        only_metric_train = True
    else:
        optimizer = torch.optim.Adam([{'params': model.parameters(), 'lr': 0.1*args.lr},
                                      {'params': criterion.parameters()}], lr=args.lr,
                                     weight_decay = args.weight_decay)
        only_metric_train = False

    # def part_param(model,str):
    #     for name,param in model.named_parameters():
    #         if str not in name:
    #             yield param
    #
    # new_param = part_param(model,'base')
    # optimizer = torch.optim.Adam([
    #                             {'params': model.module.base.parameters()},
    #                             {'params':new_param, 'weight_decay':1.5*args.weight_decay}
    #                             ]
    #                             ,lr=args.lr, weight_decay=args.weight_decay,)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        # lr = args.lr if epoch <= 270 else \
        #     args.lr * (0.001 ** ((epoch - 270) / 135))
        # lr = args.lr if epoch <= 100 else \
        #     args.lr * (0.001 ** ((epoch - 100) / 50.0))
        lr = args.lr if epoch <= args.lr_change_epochs else \
            args.lr * (0.01 ** ((epoch - args.lr_change_epochs)/float(args.epochs-args.lr_change_epochs)))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    def adjust_lambda(epochs):
        w_lambda = 0.001 if epoch <=30 else 0.001*(0.01**((epoch - 30)/70))
        return w_lambda
    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        weight_constraint_lambda = 0.0001
        trainer.train(epoch, train_loader, optimizer, logger, weight_constraint_lambda)

        #######Tensorboard-logs##########
        if not only_metric_train:
            for tag, value in model.named_parameters():
                tag = tag.replace('.', '/')
                try:
                    logger.histo_summary(tag, to_np(value), epoch*len(train_loader) + 1)
                    logger.histo_summary(tag + '/grad', to_np(value.grad), epoch*len(train_loader) + 1)
                except AttributeError,e:
                    pass
        for tag, value in criterion.named_parameters():
            tag = tag.replace('.', '/')
            try:
                logger.histo_summary(tag, to_np(value), epoch*len(train_loader) + 1)
                logger.histo_summary(tag + '/grad', to_np(value.grad), epoch*len(train_loader) + 1)
            except AttributeError, e:
                pass
            'state_dict': model.module.state_dict(),
            'adapt_metric':criterion.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
        if (epoch+1) % 50 ==0:
            checkpoint_file =osp.join(args.logs_dir, 'checkpoint.pth.tar')
            stepcheckpoint_file = osp.join(args.logs_dir, 'checkpoint{}.pth.tar'.format(epoch+1))
            shutil.copy(checkpoint_file,stepcheckpoint_file)
        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))
    now = datetime.datetime.now()
    print(now.strftime('%Y-%m-%d %H:%M:%S'))
    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'checkpoint.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    criterion.load_state_dict(checkpoint['adapt_metric'])
    # metric.train(model, train_loader)
    # evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
    if len(args.dataset) > 1:
        for dataset_name in args.dataset:
            print("{} test result:".format(dataset_name))
            evaluator.evaluate(test_loader[dataset_name], dataset.query[dataset_name],
                               dataset.gallery[dataset_name])
    else:
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery)

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Softmax loss classification")
    # data
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True
    #######Tensorboard-logs##########
    logger = TensorLogger(args.Tensorlogs_dir)
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, _, _, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    trans_train_loader, num_classes = get_fake_data(args.trans_name, args.trans_data_txt, args.height,
                                       args.width, args.batch_size, args.workers)
    # Create model
    model = models.create(args.arch,
                          dropout=0, num_classes=num_classes)
    # model = models.create(args.arch, num_features=1024, num_diff_features=args.features,
    #                       dropout=args.dropout, num_classes=num_classes, iden_pretrain=True)
    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, trans_train_loader)
        # print("Validation:")
        # evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [p for p in model.parameters() if
                      id(p) not in base_param_ids]
        param_groups = [
            {'params': model.module.base.parameters(), 'lr_mult': 0.1},
            {'params': new_params, 'lr_mult': 1.0}]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        # lr = args.lr if epoch <= 270 else \
        #     args.lr * (0.001 ** ((epoch - 270) / 135))
        # lr = args.lr if epoch <= 100 else \
        #     args.lr * (0.001 ** ((epoch - 100) / 50.0))
        lr = args.lr * 0.1**(epoch//args.lr_change_epochs)
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, trans_train_loader, optimizer, logger)
        #######Tensorboard-logs##########
        # for tag, value in model.named_parameters():
        #     tag = tag.replace('.', '/')
        #     try:
        #         logger.histo_summary(tag, to_np(value), epoch * len(train_loader) + 1)
        #         logger.histo_summary(tag + '/grad', to_np(value.grad), epoch * len(train_loader) + 1)
        #     except AttributeError, e:
        #         pass
        # for tag, value in criterion.named_parameters():
        #     tag = tag.replace('.', '/')
        #     try:
        #         logger.histo_summary(tag, to_np(value), epoch * len(train_loader) + 1)
        #         logger.histo_summary(tag + '/grad', to_np(value.grad), epoch * len(train_loader) + 1)
        #     except AttributeError, e:
        #         pass
        #################################
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, trans_train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)