예제 #1
0
def get_model_by_name(model_name, num_classes):
    """
    create model given the model_name and number of classes
    """
    if 'resnet' in model_name:
        model = models.create(
            model_name,
            num_features=4096,  #128,
            dropout=0.3,
            num_classes=num_classes)
    elif 'inception' in model_name:
        model = models.create(model_name,
                              num_features=128,
                              dropout=0.3,
                              num_classes=num_classes)
    elif 'densenet' in model_name:
        model = models.create(model_name,
                              num_features=128,
                              dropout=0.3,
                              num_classes=num_classes)
    elif 'vgg' in model_name:
        model = models.create(
            model_name,
            num_features=4096,  #128,
            dropout=0.3,
            num_classes=num_classes)
    elif 'mnist' in model_name:
        model = models.create(
            model_name,
            num_features=4096,  #128,
            dropout=0.3,
            num_classes=num_classes)
    else:
        raise ValueError('wrong model name, no such model!')
    return model
예제 #2
0
파일: model.py 프로젝트: zqh0253/FD-GAN
    def _init_models(self):
        self.net_G = CustomPoseGenerator(self.opt.pose_feature_size, 2048, self.opt.noise_feature_size,
                                dropout=self.opt.drop, norm_layer=self.norm_layer, fuse_mode=self.opt.fuse_mode, connect_layers=self.opt.connect_layers)
        e_base_model = create(self.opt.arch, cut_at_pooling=True)
        e_embed_model = EltwiseSubEmbed(use_batch_norm=True, use_classifier=True, num_features=2048, num_classes=2)
        self.net_E = SiameseNet(e_base_model, e_embed_model)

        di_base_model = create(self.opt.arch, cut_at_pooling=True)
        di_embed_model = EltwiseSubEmbed(use_batch_norm=True, use_classifier=True, num_features=2048, num_classes=1)
        self.net_Di = SiameseNet(di_base_model, di_embed_model)
        self.net_Dp = NLayerDiscriminator(3+18, norm_layer=self.norm_layer)

        if self.opt.stage==1:
            init_weights(self.net_G)
            init_weights(self.net_Dp)
            state_dict = remove_module_key(torch.load(self.opt.netE_pretrain))
            self.net_E.load_state_dict(state_dict)
            state_dict['embed_model.classifier.weight'] = state_dict['embed_model.classifier.weight'][1]
            state_dict['embed_model.classifier.bias'] = torch.FloatTensor([state_dict['embed_model.classifier.bias'][1]])
            self.net_Di.load_state_dict(state_dict)
        elif self.opt.stage==2:
            self._load_state_dict(self.net_E, self.opt.netE_pretrain)
            self._load_state_dict(self.net_G, self.opt.netG_pretrain)
            self._load_state_dict(self.net_Di, self.opt.netDi_pretrain)
            self._load_state_dict(self.net_Dp, self.opt.netDp_pretrain)
        else:
            assert('unknown training stage')

        self.net_E = torch.nn.DataParallel(self.net_E).cuda()
        self.net_G = torch.nn.DataParallel(self.net_G).cuda()
        self.net_Di = torch.nn.DataParallel(self.net_Di).cuda()
        self.net_Dp = torch.nn.DataParallel(self.net_Dp).cuda()
예제 #3
0
    def __init__(self, model_name, batch_size, num_classes, dataset, u_data, save_path, embeding_fea_size=1024,
                 dropout=0.5, max_frames=900, initial_steps=20, step_size=16):

        self.model_name = model_name
        self.num_classes = num_classes
        self.data_dir = dataset.images_dir
        self.is_video = dataset.is_video
        self.save_path = save_path

        self.dataset = dataset
        self.u_data = u_data
        self.u_label = np.array([label for _, label, _, _ in u_data])
        self.label_to_images = {}
        self.sort_image_by_label=[]

        self.dataloader_params = {}
        self.dataloader_params['height'] = 256
        self.dataloader_params['width'] = 128
        self.dataloader_params['batch_size'] = batch_size
        self.dataloader_params['workers'] = 6

        self.batch_size = batch_size
        self.data_height = 256
        self.data_width = 128
        self.data_workers = 6

        self.initial_steps = initial_steps
        self.step_size = step_size

        # batch size for eval mode. Default is 1.
        self.dropout = dropout
        self.max_frames = max_frames
        self.embeding_fea_size = embeding_fea_size

        if self.is_video:
            self.eval_bs = 1
            self.fixed_layer = True
            self.frames_per_video = 16
            self.later_steps = 5
        else:
            self.eval_bs = 128
            self.fixed_layer = False
            self.frames_per_video = 1
            self.later_steps = 2

        if self.model_name == 'avg_pool':
            model = models.create(self.model_name, dropout=self.dropout, 
                              embeding_fea_size=self.embeding_fea_size, fixed_layer=self.fixed_layer)
        else:
            model = models.create(self.model_name, embed_dim=self.embeding_fea_size, 
                              dropout=self.dropout, fix_part_layers=self.fixed_layer)
        self.model = nn.DataParallel(model).cuda()

        self.criterion = ExLoss(self.embeding_fea_size, self.num_classes, t=10).cuda()
예제 #4
0
def main_worker(args):
    cudnn.benchmark = True

    log_dir = osp.dirname(args.resume)
    sys.stdout = Logger(osp.join(log_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    # Create data loaders
    dataset_target, test_loader_target = \
        get_data(args.dataset_target, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    model = models.create(args.arch, pretrained=False, cut_at_pooling=args.cut_at_pooling,
                          num_features=args.features, dropout=args.dropout, num_classes=0)
    model.cuda()
    model = nn.DataParallel(model)

    # Load from checkpoint
    checkpoint = load_checkpoint(args.resume)
    copy_state_dict(checkpoint['state_dict'], model)
    # start_epoch = checkpoint['epoch']
    # best_mAP = checkpoint['best_mAP']
    # print("=> Checkpoint of epoch {}  best mAP {:.1%}".format(start_epoch, best_mAP))

    # Evaluator
    evaluator = Evaluator(model)
    print("Test on the target domain of {}:".format(args.dataset_target))
    evaluator.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True, rerank=args.rerank)
    return
예제 #5
0
 def resume(self, ckpt_file):
     print("continued from", ckpt_file)
     model = models.create(self.model_name,
                           dropout=self.dropout,
                           num_classes=self.num_classes)
     self.model = nn.DataParallel(model).cuda()
     self.model.load_state_dict(load_checkpoint(ckpt_file))
예제 #6
0
def main(args):
    dataset, num_classes, train_loader, query_loader, gallery_loader = \
        get_data(args.dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.re, args.workers)
    model = models.create('resnet50', num_features=1024,
                          dropout=0.5, num_classes=13164)
    model = model.cuda()
    checkpoint = load_checkpoint('./checkpointres50.pth.tar')
    model.load_state_dict(checkpoint['state_dict'])

    for batch_idx, (imgs, _, pids) in enumerate(train_loader):
        imgs = imgs.cuda()
        featurelist, _ = model(imgs)
        all = featurelist[6].data.cpu()
        all= F.normalize(all)
        break
    for batch_idx, (imgs, _, pids) in enumerate(train_loader):
        imgs = imgs.cuda()
        featurelist, _ = model(imgs)
        features = featurelist[6].data.cpu()
        features=F.normalize(features)
        all = torch.cat((all, features), 0)
        print(batch_idx)
        if batch_idx == 100:
            torch.save(all, './renet50vidlayer6.pkl')
            break
    torch.save(all, './renet50vidlayer6.pkl')
    print('done!')
예제 #7
0
def main_worker(args):
    cudnn.benchmark = True

    log_dir = osp.dirname(args.resume)
    sys.stdout = Logger(osp.join(log_dir, 'log_test.txt'))

    # Create data loaders
    dataset_target, test_loader_target = \
        get_data(args.dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    model = models.create(args.arch, num_features=args.features, num_classes=0)
    model.cuda()
    model = nn.DataParallel(model)

    # Load from checkpoint
    checkpoint = load_checkpoint(args.resume)
    copy_state_dict(checkpoint, model)

    # Evaluator
    evaluator = Evaluator(model)
    print("Testing...")
    evaluator.evaluate(test_loader_target,
                       dataset_target.query,
                       dataset_target.gallery,
                       cmc_flag=True,
                       args=args,
                       rerank=args.rerank)
    return
예제 #8
0
파일: test.py 프로젝트: wyf27/DLMB-PB
def main(args):
    cudnn.benchmark = True
    # Redirect print to both console and log file

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    detectmodel, ic = getdetection()
    dataset, num_classes, train_loader, query_loader, gallery_loader = \
        get_data(detectmodel, ic, args.dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.re, args.workers)

    model = models.create(args.arch,
                          num_features=512,
                          dropout=args.dropout,
                          num_classes=13164)
    model = model.cuda()
    checkpoint = load_checkpoint(
        '/home/l603a/REID/wyf/DLMB-PB/resnet18_hd_nodropout_FC_three_branch.pth.tar'
    )
    model.load_state_dict(checkpoint['state_dict'])
    top1, map = test_vehicleid(model,
                               query_loader,
                               gallery_loader,
                               query_loader,
                               gallery_loader,
                               32,
                               'xent',
                               euclidean_distance_loss='xent',
                               epoch=0,
                               use_metric_cuhk03=False,
                               ranks=[1, 5, 10, 20],
                               return_distmat=False)
예제 #9
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.benchmark = True

    use_gpu = torch.cuda.is_available()
    if args.use_cpu:
        use_gpu = False

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
            (256, 128)
    query_img = args.query
    gallery_path = args.gallery

    transform = Compose([
        RectScale(args.height, args.width),
        ToTensor(),
        Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    query_loader = DataLoader(RawDatasetPath(glob.glob(query_img + '/*.jpg'),
                                             transform=transform),
                              batch_size=1,
                              shuffle=False,
                              num_workers=args.workers,
                              pin_memory=use_gpu,
                              drop_last=False)

    gallery_loader = DataLoader(RawDatasetPath(glob.glob(gallery_path +
                                                         '/*.jpg'),
                                               transform=transform),
                                batch_size=16,
                                shuffle=False,
                                num_workers=args.workers,
                                pin_memory=use_gpu,
                                drop_last=False)

    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=751,
                          cut_at_pooling=False,
                          FCN=True)

    # Load from checkpoint
    print('Loading model ...')
    if args.load_weights and check_isfile(args.load_weights):
        load_checkpoint(model, args.load_weights)

    model = nn.DataParallel(model).cuda() if use_gpu else model

    distmat = inference(model, query_loader, gallery_loader, use_gpu)

    if args.visualize_ranks:
        # Do some visualize ranks
        pass
예제 #10
0
def test_with_open_reid(args):
    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)
    # Create model
    model = models.create(args.arch,
                          num_features=1024,
                          dropout=args.dropout,
                          num_classes=args.features)

    model = nn.DataParallel(model).cuda()
    print('Test with best model:')
    # checkpoint = load_checkpoint(osp.join(args.logs_dir, 'checkpoint.pth.tar'))
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)
    # Evaluator
    evaluator = Evaluator(model)
    metric.train(model, train_loader)
    print("Validation:")
    evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
    print("Test:")
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
예제 #11
0
def loadDataset():
    torch.cuda.set_device(0)
    logs_dir = 'market-1501-Exper33/RPP/'
    num_features = 256
    num_classes = 751
    T = 1
    dim = 256
    dropout = 0.5

    model = models.create('resnet50_rpp',
                          num_features=num_features,
                          dropout=dropout,
                          num_classes=num_classes,
                          cut_at_pooling=False,
                          FCN=True,
                          T=T,
                          dim=dim)
    model = model.cuda()
    checkpoint = load_checkpoint(
        osp.join(logs_dir, 'cvs_checkpoint_0107.pth.tar'))
    model.load_state_dict(checkpoint['state_dict'])

    res = []
    frame_number = 0

    # --datasets
    shujuku = {}
    rentidir = '/data/reid/renti/queries'

    return model
예제 #12
0
def predict():
    img = preprocess_image(cv2.imread(find_new_img(), 1))
    model = models.create('resnet50',
                          num_features=256,
                          dropout=0.25,
                          num_classes=5005,
                          cut_at_pooling=False,
                          FCN=True)
    tar = torch.load('/root/HumpbackWhale/checkpoint.pth.tar',
                     map_location='cpu')
    state_dict = tar['state_dict']
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        if 'module.' in k:
            name = k[7:]  # remove `module.`
        else:
            name = k
        new_state_dict[name] = v
    model.load_state_dict(new_state_dict)  #{'state_dict':new_state_dict})

    #model.load_state_dict(torch.load("/root/HumpbackWhale/identification/checkpoint.pth.tar",map_location='cpu'))
    if cuda:
        model = model.cuda()
    d = model(img)
    #return int(torch.max(model(img)[1][0][0], dim=0)[1].numpy())
    return voc[int(torch.max(model(img)[1][0][0], dim=0)[1].numpy())]
예제 #13
0
def loadDataset():
    logs_dir = 'market-1501-Exper33/RPP/'
    num_features = 256
    num_classes = 751
    T = 1
    dim = 256
    dropout = 0.5

    ###
    model = models.create('resnet50_rpp',
                          num_features=num_features,
                          dropout=dropout,
                          num_classes=num_classes,
                          cut_at_pooling=False,
                          FCN=True,
                          T=T,
                          dim=dim)
    model = model.cuda()
    checkpoint = load_checkpoint(osp.join(logs_dir, 'checkpoint.pth.tar'))
    model.load_state_dict(checkpoint['state_dict'])

    res = []
    frame_number = 0

    return model
예제 #14
0
    def train(self,
              train_data,
              epochs=70,
              step_size=55,
              init_lr=0.1,
              dropout=0.5):
        """ create model and dataloader """
        model = models.create(self.model_name,
                              dropout=self.dropout,
                              num_classes=self.num_classes)
        model = nn.DataParallel(model).cuda()
        dataloader = self.get_dataloader(train_data, training=True)

        # the base parameters for the backbone (e.g. ResNet50)
        base_param_ids = set(map(id, model.module.CNN.base.parameters()))

        # we fixed the first three blocks to save GPU memory
        base_params_need_for_grad = filter(lambda p: p.requires_grad,
                                           model.module.CNN.parameters())

        # params of the new layers
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]

        # set the learning rate for backbone to be 0.1 times
        param_groups = [{
            'params': base_params_need_for_grad,
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]

        criterion = nn.CrossEntropyLoss().cuda()
        optimizer = torch.optim.SGD(param_groups,
                                    lr=init_lr,
                                    momentum=0.5,
                                    weight_decay=5e-4,
                                    nesterov=True)

        # change the learning rate by step
        def adjust_lr(epoch, step_size):
            lr = init_lr / (10**(epoch // step_size))
            for g in optimizer.param_groups:
                g['lr'] = lr * g.get('lr_mult', 1)

            if epoch % step_size == 0:
                print("Epoch {}, current lr {}".format(epoch, lr))

        """ main training process """
        trainer = Trainer(model, criterion)
        for epoch in range(epochs):
            adjust_lr(epoch, step_size)
            trainer.train(epoch, dataloader, optimizer, print_freq=10)

        torch.save(model.state_dict(),
                   osp.join(self.save_path, "model_{}.ckpt".format(epoch)))
        self.model = model
예제 #15
0
 def resume(self, ckpt_file, step):
     print("continued from step", step)
     model = models.create(self.model_name,
                           dropout=self.dropout,
                           num_classes=self.num_classes,
                           is_output_feature=True)
     self.model = nn.DataParallel(model).cuda()
     self.model.load_state_dict(load_checkpoint(ckpt_file))
예제 #16
0
 def resume(self, ckpt_file, step):
     print("continued from step", step)
     model = models.create(self.model_name,
                           dropout=self.dropout,
                           embeding_fea_size=self.embeding_fea_size,
                           fixed_layer=self.fixed_layer)
     self.model = nn.DataParallel(model).cuda()
     self.model.load_state_dict(load_checkpoint(ckpt_file))
def main(args):
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    #device_ids = [0, 1, 2, 3]
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, query_loader, gallery_loader = \
        get_data(args.dataset,  args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 )

    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=num_classes,
                          cut_at_pooling=False,
                          FCN=True)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model_dict = model.state_dict()
        checkpoint_load = {
            k: v
            for k, v in (checkpoint['state_dict']).items() if k in model_dict
        }
        model_dict.update(checkpoint_load)
        model.load_state_dict(model_dict)
        #        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))

    #model = nn.DataParallel(model)
    model = nn.DataParallel(model).cuda()

    # Evaluator
    evaluator = Evaluator(model)

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint('checkpoint.pth.tar')
    model.module.load_state_dict(checkpoint['state_dict'])
    evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                       dataset.gallery)
예제 #18
0
def init_model(model_path):
    model = models.create('resnet50',
                          num_features=128,
                          num_classes=216,
                          dropout=0)
    checkpoint = load_checkpoint(model_path)
    model.load_state_dict(checkpoint['state_dict'])
    model = torch.nn.DataParallel(model).cuda()
    return model
 def evaluate(self, query, gallery):
     test_loader = self.get_dataloader(list(set(query) | set(gallery)), training = False)
     param = self.model.state_dict()
     del self.model
     model = models.create(self.model_name, dropout=self.dropout, num_classes=self.num_classes, is_output_feature = True)
     self.model = nn.DataParallel(model).cuda()
     self.model.load_state_dict(param)
     evaluator = Evaluator(self.model)
     evaluator.evaluate(test_loader, query, gallery)
예제 #20
0
def create_model(args, classes):
    model = models.create(args.arch, num_features=args.features, norm=False, dropout=args.dropout, num_classes=classes)

    model.cuda()
    model = nn.DataParallel(model)

    initial_weights = load_checkpoint(args.init)
    copy_state_dict(initial_weights['state_dict'], model)

    return model
예제 #21
0
def main(args):
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    sys.stdout = Logger(
        osp.join(args.logs_dir, 'log-partial-reid-group-test.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset_set, num_classes, query_loader_set, gallery_loader_set = \
        get_data(args.dataset,  args.data_dir, args.height,
                 args.width, args.ratio, args.batch_size, args.workers
                 )

    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=num_classes,
                          cut_at_pooling=False,
                          FCN=True,
                          num_parts=args.num_parts)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model_dict = model.state_dict()
        checkpoint_load = {
            k: v
            for k, v in (checkpoint['state_dict']).items() if k in model_dict
        }
        model_dict.update(checkpoint_load)
        model.load_state_dict(model_dict)
        #        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))

    model = nn.DataParallel(model).cuda()

    # Evaluator
    evaluator = Evaluator(model)

    print("Test:")
    with torch.no_grad():
        evaluator.evaluate(query_loader_set, gallery_loader_set, dataset_set)
    return
예제 #22
0
def train(train_data, data_dir, config):
    config.set_training(True)
    model = models.create(config.model_name,
                          num_features=config.num_features,
                          dropout=config.dropout,
                          num_classes=config.num_classes)
    #model = model.cuda()
    model = nn.DataParallel(model).cuda()
    dataloader = dp.get_dataloader(train_data, data_dir, config)
    train_model(model, dataloader, config)
    return model
예제 #23
0
 def resume(self, ckpt_file, step):
     print("continued from step", step)
     model = models.create(self.model_name,
                           dropout=self.dropout,
                           num_classes=self.num_classes,
                           mode=self.mode)
     # self.model = nn.DataParallel(model).cuda()
     model.load_state_dict(load_checkpoint(ckpt_file), strict=False)
     model_distill = deepcopy(model)
     # model.load_state_dict(load_checkpoint_new(ckpt_file), strict=False)
     self.model = nn.DataParallel(model).cuda()
     self.model_distill = nn.DataParallel(model_distill).cuda()
예제 #24
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    #if args.height is None or args.width is None:
    #    args.height, args.width = (224, 224)
    #dataset, num_classes, train_loader, test_loader = \
    #    get_data(args.dataset, args.split, args.data_dir, args.height,
    #             args.width, args.batch_size, args.num_instances, args.workers,
    #             args.combine_trainval)

    query_loader, gallery_loader = get_real_test_data(args.query_dir,
                                                      args.gallery_dir,
                                                      args.height, args.width,
                                                      args.batch_size,
                                                      args.workers)
    #query_loader, _ = get_real_test_data(args.query_dir, args.gallery_dir, args.height, args.width, args.batch_size, args.workers)

    # Create model
    model = models.create(args.arch,
                          num_classes=args.num_classes,
                          num_features=args.features,
                          test_attribute=True)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.weights:
        checkpoint = load_checkpoint(args.weights)
        model.load_state_dict(checkpoint['state_dict'])
    if args.resume and not args.weights:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))

    model = nn.DataParallel(model).cuda()

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
        return

    # Test
    evaluator.evaluate(query_loader, gallery_loader)
예제 #25
0
def setup_model(loss, architecture, num_classes):
    if loss == 'triplet':
        dropout = 0
        num_features = 1024
        num_classes = 128
    elif loss == 'softmax':
        dropout = 0.5
        num_features = 128
    return models.create(architecture,
                         num_features=num_features,
                         dropout=dropout,
                         num_classes=num_classes)
예제 #26
0
def main(args):

    # Load the synset words
    idx2cls = list()
    with open('samples/synset_words.txt') as lines:
        for line in lines:
            line = line.strip().split(' ', 1)[1]
            line = line.split(', ', 1)[0].replace(' ', '_')
            idx2cls.append(line)

    # Setup a classification model
    print('Loading a model...', end='')
    #model = torchvision.models.resnet152(pretrained=True)
    model = models.create('resnet50', num_features=256,
                                      dropout=0.25, num_classes=5005, cut_at_pooling=False, FCN=True)
    tar = torch.load('../checkpoint.pth.tar')
    state_dict = tar['state_dict']
    model.load_state_dict(state_dict)
    
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])
    #transform = T.Compose([
    #    T.RectScale(256, 256),
    #    T.ToTensor(),
    #    normalizer,
    #    transforms.ToTensor(),
    #    #transforms.Normalize(mean=[0.485, 0.456, 0.406],
    #     #                    std=[0.229, 0.224, 0.225])
    #])
    transform = transforms.Compose([
               # transforms.RectScale(256,256)
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225])
    ])

    print('finished')

    # Setup the SmoothGrad
    smooth_grad = SmoothGrad(model=model, cuda=args.cuda, sigma=args.sigma,
                             n_samples=args.n_samples, guided=args.guided)
    img = os.listdir('../dataset/train/')
    idx = 3
    args.image = osp.join('../dataset/train/',img[idx])
    # Predict without adding noises
    smooth_grad.load_image(filename=args.image, transform=transform)
    prob, idx = smooth_grad.forward()

    # Generate the saliency images of top 3
    for i in range(0, 3):
       # print('{:.5f}'.format(prob[i]))
        smooth_grad.generate(
            filename='results/{}'.format( i), idx=idx[i])
예제 #27
0
def create_model(args):
    model = models.create(args.arch,
                          num_features=args.features,
                          norm=True,
                          dropout=args.dropout,
                          num_classes=0)
    # use CUDA
    model = model.cuda()
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.copy_weight(checkpoint['state_dict'])
    model = nn.DataParallel(model)
    return model
예제 #28
0
def getdetection():
    model = models.create('resnet50', num_features=1024,
                          dropout=0.5, num_classes=13164)
    model = model.cuda()
    model.eval()
    checkpoint = load_checkpoint('./checkpointres50.pth.tar')
    model.load_state_dict(checkpoint['state_dict'])
    label = torch.load('./renet50layer6label10class15000.pkl')
    Ic = np.zeros(1024)
    for i in range(0, 1024):
        if label[i] == 4:
            Ic[i] = 1
    Ic = torch.from_numpy(Ic)
    return model ,Ic
    def softmax_train(self, train_data, unselected_data, step, epochs, step_size, init_lr, dropout, loss):

        """ create model and dataloader """
        model = models.create(self.model_name, dropout=self.dropout, num_classes=self.num_classes, 
                              embeding_fea_size=self.embeding_fea_size, classifier = loss, fixed_layer=self.fixed_layer)

        model = nn.DataParallel(model).cuda()

        # the base parameters for the backbone (e.g. ResNet50)
        base_param_ids = set(map(id, model.module.CNN.base.parameters())) 
        base_params_need_for_grad = filter(lambda p: p.requires_grad, model.module.CNN.base.parameters()) 
        new_params = [p for p in model.parameters() if id(p) not in base_param_ids]

        # set the learning rate for backbone to be 0.1 times
        param_groups = [
            {'params': base_params_need_for_grad, 'lr_mult': 0.1},
            {'params': new_params, 'lr_mult': 1.0}]


        exclusive_criterion = ExLoss(self.embeding_fea_size, len(unselected_data) , t=10).cuda()

        optimizer = torch.optim.SGD(param_groups, lr=init_lr, momentum=self.train_momentum, weight_decay = 5e-4, nesterov=True)

        # change the learning rate by step
        def adjust_lr(epoch, step_size):
            
            use_unselcted_data = True
            lr = init_lr / (10 ** (epoch // step_size))
            for g in optimizer.param_groups:
                g['lr'] = lr * g.get('lr_mult', 1)
            if epoch >= step_size:
                use_unselcted_data = False
                # print("Epoch {}, CE loss, current lr {}".format(epoch, lr))
            return use_unselcted_data


        s_dataloader = self.get_dataloader(train_data, training=True, is_ulabeled=False)
        u_dataloader = self.get_dataloader(unselected_data, training=True, is_ulabeled=True)


        """ main training process """
        trainer = Trainer(model, exclusive_criterion, fixed_layer=self.fixed_layer, lamda = self.lamda)
        for epoch in range(epochs):
            use_unselcted_data = adjust_lr(epoch, step_size)
            trainer.train(epoch, s_dataloader, u_dataloader, optimizer, use_unselcted_data, print_freq=len(s_dataloader)//2)

        ckpt_file = osp.join(self.save_path,  "step_{}.ckpt".format(step))
        torch.save(model.state_dict(), ckpt_file)
        self.model = model
예제 #30
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=num_classes)
    start_epoch = best_top1 = 0
    model = nn.DataParallel(model).cuda()
    if args.resume:
        #checkpoint = load_checkpoint(args.resume)
        #state_dict = get_state_dict(checkpoint['state_dict'],model.state_dict())
        #model.load_state_dict(state_dict)
        #start_epoch = checkpoint['epoch']
        #best_top1 = checkpoint['best_top1']
        #print("=> Start epoch {}  best top1 {:.1%}"
        #      .format(start_epoch, best_top1))
        state_dict = torch.load(args.resume)
        state_dict = get_state_dict(state_dict, model.state_dict())
        model.load_state_dict(state_dict)
    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)

    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
예제 #31
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    logger = TensorLogger(osp.join(args.log_root, 'Tensorboard_logs', args.logs_dir))
    # Redirect print to both console and log file
    logs_dir = osp.join(args.log_root, args.logs_dir)
    if not args.evaluate:
        sys.stdout = Logger(osp.join(logs_dir, 'log.txt'))

    # Create data loaders
    # assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)
    print("lr:", args.lr)
    print("max_epoch:", args.epochs)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch, num_features=1024,
                          num_diff_features=128,
                          dropout=args.dropout,
                          cut_at_pooling=False)
    print(model)
    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        pretrained_dict = {k: v for k, v in checkpoint['state_dict'].items()
                           if k in model.state_dict()}
        model_dict = model.state_dict()
        model_dict.update(pretrained_dict)
        model.load_state_dict(model_dict)
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        # print("Validation:")
        # evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        if len(args.dataset)>1:
            for dataset_name in args.dataset:
                print("{} test result:".format(dataset_name))
                evaluator.evaluate(test_loader[dataset_name], dataset.query[dataset_name],
                                   dataset.gallery[dataset_name], metric)
            return
        else:
            evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
            return

    # Criterion
    criterion = TripletLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        # lr = args.lr if epoch <= 270 else \
        #     args.lr * (0.001 ** ((epoch - 270) / 135))
        # lr = args.lr if epoch <= 100 else \
        #     args.lr * (0.001 ** ((epoch - 100) / 50.0))
        lr = args.lr if epoch <= args.lr_change_epochs else \
            args.lr * (0.001 ** ((epoch - args.lr_change_epochs)/float(args.epochs-args.lr_change_epochs)))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
    # epoch_num = args.maxstep//(750//18) + 1
    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer, logger)
        #######Tensorboard-logs##########
        for tag, value in model.named_parameters():
            tag = tag.replace('.', '/')
            try:
                logger.histo_summary(tag, to_np(value), epoch*len(train_loader) + 1)
                logger.histo_summary(tag + '/grad', to_np(value.grad), epoch*len(train_loader) + 1)
            except AttributeError:
                pass
        # top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)
        # top1 = evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
        top1 = 1
        # is_best = top1 > best_top1
        # best_top1 = max(top1, best_top1)
        # save_checkpoint({
        #     'state_dict': model.module.state_dict(),
        #     'epoch': epoch + 1,
        #     'best_top1': best_top1,
        # }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
        is_best = False
        best_top1 = 1
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(logs_dir, 'checkpoint.pth.tar'))
        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(logs_dir, 'checkpoint.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    print("Test:")
    if len(args.dataset) > 1:
        for dataset_name in args.dataset:
            print("{} test result:".format(dataset_name))
            evaluator.evaluate(test_loader[dataset_name], dataset.query[dataset_name],
                               dataset.gallery[dataset_name], metric)
    else:
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
예제 #32
0
def main(args):

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.benchmark = True
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # log file
    if args.evaluate == 1:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log_test.txt'))
    else:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log_train.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    # from reid.data import get_data ,
    dataset, num_classes, train_loader, query_loader, gallery_loader = \
        get_data(args.dataset, args.split, args.data_dir,
                 args.batch_size, args.seq_len, args.seq_srd,
                 args.workers, args.train_mode)

    # create CNN model
    cnn_model = models.create(args.a1, num_features=args.features, dropout=args.dropout)

    # create ATT model
    input_num = cnn_model.feat.in_features  # 2048
    output_num = args.features  # 128
    att_model = models.create(args.a2, input_num, output_num)

    # create classifier model
    class_num = 2
    classifier_model = models.create(args.a3,  output_num, class_num)

    # CUDA acceleration model

    cnn_model = torch.nn.DataParallel(cnn_model).to(device)
    att_model = att_model.to(device)
    classifier_model = classifier_model.to(device)

    criterion_oim = OIMLoss(args.features, num_classes,
                            scalar=args.oim_scalar, momentum=args.oim_momentum)
    criterion_veri = PairLoss(args.sampling_rate)
    criterion_oim.to(device)
    criterion_veri.to(device)

    # Optimizer
    base_param_ids = set(map(id, cnn_model.module.base.parameters()))
    new_params = [p for p in cnn_model.parameters() if
                  id(p) not in base_param_ids]

    param_groups1 = [
        {'params': cnn_model.module.base.parameters(), 'lr_mult': 1},
        {'params': new_params, 'lr_mult': 1}]
    param_groups2 = [
        {'params': att_model.parameters(), 'lr_mult': 1},
        {'params': classifier_model.parameters(), 'lr_mult': 1}]

    optimizer1 = torch.optim.SGD(param_groups1, lr=args.lr1,
                                 momentum=args.momentum,
                                 weight_decay=args.weight_decay,
                                 nesterov=True)

    optimizer2 = torch.optim.SGD(param_groups2, lr=args.lr2,
                                 momentum=args.momentum,
                                 weight_decay=args.weight_decay,
                                 nesterov=True)
    # optimizer1 = torch.optim.Adam(param_groups1, lr=args.lr1, weight_decay=args.weight_decay)
    #
    # optimizer2 = torch.optim.Adam(param_groups2, lr=args.lr2, weight_decay=args.weight_decay)

    # Schedule Learning rate
    def adjust_lr1(epoch):
        lr = args.lr1 * (0.1 ** (epoch/args.lr1step))
        print(lr)
        for g in optimizer1.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    def adjust_lr2(epoch):
        lr = args.lr2 * (0.01 ** (epoch//args.lr2step))
        print(lr)
        for g in optimizer2.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    def adjust_lr3(epoch):
        lr = args.lr3 * (0.000001 ** (epoch //args.lr3step))
        print(lr)
        return lr

    # Trainer
    trainer = SEQTrainer(cnn_model, att_model, classifier_model, criterion_veri, criterion_oim, args.train_mode, args.lr3)

    # Evaluator
    if args.train_mode == 'cnn':
        evaluator = CNNEvaluator(cnn_model, args.train_mode)
    elif args.train_mode == 'cnn_rnn':
        evaluator = ATTEvaluator(cnn_model, att_model, classifier_model, args.train_mode)

    else:
        raise RuntimeError('Yes, Evaluator is necessary')

    best_top1 = 0
    if args.evaluate == 1:  # evaluate
        checkpoint = load_checkpoint(osp.join(args.logs_dir, 'cnnmodel_best.pth.tar'))
        cnn_model.load_state_dict(checkpoint['state_dict'])

        checkpoint = load_checkpoint(osp.join(args.logs_dir, 'attmodel_best.pth.tar'))
        att_model.load_state_dict(checkpoint['state_dict'])

        checkpoint = load_checkpoint(osp.join(args.logs_dir, 'clsmodel_best.pth.tar'))
        classifier_model.load_state_dict(checkpoint['state_dict'])

        top1 = evaluator.evaluate(query_loader, gallery_loader, dataset.queryinfo, dataset.galleryinfo)

    else:
        for epoch in range(args.start_epoch, args.epochs):
            adjust_lr1(epoch)
            adjust_lr2(epoch)
            rate = adjust_lr3(epoch)
            trainer.train(epoch, train_loader, optimizer1, optimizer2, rate)

            if (epoch+1) % 3 == 0 or (epoch+1) == args.epochs:

                top1 = evaluator.evaluate(query_loader, gallery_loader, dataset.queryinfo, dataset.galleryinfo)
                is_best = top1 > best_top1
                if is_best:
                    best_top1 = top1

                save_cnn_checkpoint({
                    'state_dict': cnn_model.state_dict(),
                    'epoch': epoch + 1,
                    'best_top1': best_top1,
                }, is_best, fpath=osp.join(args.logs_dir, 'cnn_checkpoint.pth.tar'))

                if args.train_mode == 'cnn_rnn':
                    save_att_checkpoint({
                        'state_dict': att_model.state_dict(),
                        'epoch': epoch + 1,
                        'best_top1': best_top1,
                    }, is_best, fpath=osp.join(args.logs_dir, 'att_checkpoint.pth.tar'))

                    save_cls_checkpoint({
                        'state_dict': classifier_model.state_dict(),
                        'epoch': epoch + 1,
                        'best_top1': best_top1,
                    }, is_best, fpath=osp.join(args.logs_dir, 'cls_checkpoint.pth.tar'))
예제 #33
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch, num_features=1024,
                          dropout=args.dropout, num_classes=args.features)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = TripletLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= 100 else \
            args.lr * (0.001 ** ((epoch - 100) / 50.0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
예제 #34
0
def main(args):
    # os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True
    #######Tensorboard-logs##########
    logger = TensorLogger(args.Tensorlogs_dir)
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log5.txt'))
    # sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)
    now = datetime.datetime.now()
    print(now.strftime('%Y-%m-%d %H:%M:%S'))
    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch, num_features=1024,
                          dropout=args.dropout, num_diff_features=args.features, \
                          pretrained=True)

    # model_path = load_checkpoint(args.model_path)
    # model.load_state_dict(model_path['state_dict'])

    # Criterion
    criterion = AdaptTripletLoss(margin=args.margin, num_feature=args.features).cuda()
    # criterion = TripletLoss(margin=args.margin,\
    #                          metric_embedding=args.metric_embedding).cuda()

    start_epoch = best_top1 = top1 = 0
    is_best = False
    if args.resume_from_trip:
        model_path = load_checkpoint(args.resume_from_trip)
        model.load_state_dict(model_path['state_dict'])
        start_epoch = model_path['epoch']
        best_top1 = model_path['best_top1']
        is_best = False
        top1 = best_top1
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    if args.resume:
        model_path = load_checkpoint(args.resume)
        model.load_state_dict(model_path['state_dict'])
        criterion.load_state_dict(model_path['adapt_metric'])
        start_epoch = model_path['epoch']
        best_top1 = model_path['best_top1']
        is_best = False
        top1 = best_top1
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    # Load from checkpoint

    # if args.resume:
    #     checkpoint = load_checkpoint(args.resume)
    #     model.load_state_dict(checkpoint['state_dict'])
    #     start_epoch = checkpoint['epoch']
    #     best_top1 = checkpoint['best_top1']
    #     print("=> Start epoch {}  best top1 {:.1%}"
    #           .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = ADP_Evaluator(model, criterion)
    # evaluator = Evaluator(model)
    if args.evaluate:
        # metric.train(model, train_loader)
        # print("Validation:")
        # evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        if len(args.dataset) > 1:
            for dataset_name in args.dataset:
                print("{} test result:".format(dataset_name))
                evaluator.evaluate(test_loader[dataset_name], dataset.query[dataset_name],
                                   dataset.gallery[dataset_name])
            return
        else:
            evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
            return

    # Optimizer
    # optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
    #                              weight_decay=args.weight_decay)

    if args.only_train_metric:
        optimizer = torch.optim.Adam(criterion.parameters(), lr=args.lr,
                                     weight_decay = args.weight_decay)

        for param in model.parameters():
            param.requires_grad = False
        only_metric_train = True
    else:
        optimizer = torch.optim.Adam([{'params': model.parameters(), 'lr': 0.1*args.lr},
                                      {'params': criterion.parameters()}], lr=args.lr,
                                     weight_decay = args.weight_decay)
        only_metric_train = False

    # def part_param(model,str):
    #     for name,param in model.named_parameters():
    #         if str not in name:
    #             yield param
    #
    # new_param = part_param(model,'base')
    # optimizer = torch.optim.Adam([
    #                             {'params': model.module.base.parameters()},
    #                             {'params':new_param, 'weight_decay':1.5*args.weight_decay}
    #                             ]
    #                             ,lr=args.lr, weight_decay=args.weight_decay,)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        # lr = args.lr if epoch <= 270 else \
        #     args.lr * (0.001 ** ((epoch - 270) / 135))
        # lr = args.lr if epoch <= 100 else \
        #     args.lr * (0.001 ** ((epoch - 100) / 50.0))
        lr = args.lr if epoch <= args.lr_change_epochs else \
            args.lr * (0.01 ** ((epoch - args.lr_change_epochs)/float(args.epochs-args.lr_change_epochs)))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    def adjust_lambda(epochs):
        w_lambda = 0.001 if epoch <=30 else 0.001*(0.01**((epoch - 30)/70))
        return w_lambda
    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        weight_constraint_lambda = 0.0001
        trainer.train(epoch, train_loader, optimizer, logger, weight_constraint_lambda)

        #######Tensorboard-logs##########
        if not only_metric_train:
            for tag, value in model.named_parameters():
                tag = tag.replace('.', '/')
                try:
                    logger.histo_summary(tag, to_np(value), epoch*len(train_loader) + 1)
                    logger.histo_summary(tag + '/grad', to_np(value.grad), epoch*len(train_loader) + 1)
                except AttributeError,e:
                    pass
        for tag, value in criterion.named_parameters():
            tag = tag.replace('.', '/')
            try:
                logger.histo_summary(tag, to_np(value), epoch*len(train_loader) + 1)
                logger.histo_summary(tag + '/grad', to_np(value.grad), epoch*len(train_loader) + 1)
            except AttributeError, e:
                pass
예제 #35
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
            (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    # Create model
    model = models.create(args.arch, num_features=args.features, norm=True,
                          dropout=args.dropout)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = OIMLoss(model.module.num_features, num_classes,
                        scalar=args.oim_scalar,
                        momentum=args.oim_momentum).cuda()

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [p for p in model.parameters() if
                      id(p) not in base_param_ids]
        param_groups = [
            {'params': model.module.base.parameters(), 'lr_mult': 0.1},
            {'params': new_params, 'lr_mult': 1.0}]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 60 if args.arch == 'inception' else 40
        lr = args.lr * (0.1 ** (epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
예제 #36
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True
    #######Tensorboard-logs##########
    logger = TensorLogger(args.Tensorlogs_dir)
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, _, _, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    trans_train_loader, num_classes = get_fake_data(args.trans_name, args.trans_data_txt, args.height,
                                       args.width, args.batch_size, args.workers)
    # Create model
    model = models.create(args.arch,
                          dropout=0, num_classes=num_classes)
    # model = models.create(args.arch, num_features=1024, num_diff_features=args.features,
    #                       dropout=args.dropout, num_classes=num_classes, iden_pretrain=True)
    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, trans_train_loader)
        # print("Validation:")
        # evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [p for p in model.parameters() if
                      id(p) not in base_param_ids]
        param_groups = [
            {'params': model.module.base.parameters(), 'lr_mult': 0.1},
            {'params': new_params, 'lr_mult': 1.0}]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        # lr = args.lr if epoch <= 270 else \
        #     args.lr * (0.001 ** ((epoch - 270) / 135))
        # lr = args.lr if epoch <= 100 else \
        #     args.lr * (0.001 ** ((epoch - 100) / 50.0))
        lr = args.lr * 0.1**(epoch//args.lr_change_epochs)
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, trans_train_loader, optimizer, logger)
        #######Tensorboard-logs##########
        # for tag, value in model.named_parameters():
        #     tag = tag.replace('.', '/')
        #     try:
        #         logger.histo_summary(tag, to_np(value), epoch * len(train_loader) + 1)
        #         logger.histo_summary(tag + '/grad', to_np(value.grad), epoch * len(train_loader) + 1)
        #     except AttributeError, e:
        #         pass
        # for tag, value in criterion.named_parameters():
        #     tag = tag.replace('.', '/')
        #     try:
        #         logger.histo_summary(tag, to_np(value), epoch * len(train_loader) + 1)
        #         logger.histo_summary(tag + '/grad', to_np(value.grad), epoch * len(train_loader) + 1)
        #     except AttributeError, e:
        #         pass
        #################################
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, trans_train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)