Beispiel #1
0
def main():
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    use_gpu = torch.cuda.is_available()

    sys.stdout = Logger(osp.join(args.resume, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_dataset(name=args.dataset, root=args.root)

    # Data augmentation
    spatial_transform_test = ST.Compose([
        ST.Scale((args.height, args.width), interpolation=3),
        ST.ToTensor(),
        ST.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    temporal_transform_test = None

    pin_memory = True if use_gpu else False

    queryloader = DataLoader(VideoDataset(
        dataset.query,
        spatial_transform=spatial_transform_test,
        temporal_transform=temporal_transform_test),
                             batch_size=1,
                             shuffle=False,
                             num_workers=0,
                             pin_memory=pin_memory,
                             drop_last=False)

    galleryloader = DataLoader(VideoDataset(
        dataset.gallery,
        spatial_transform=spatial_transform_test,
        temporal_transform=temporal_transform_test),
                               batch_size=1,
                               shuffle=False,
                               num_workers=0,
                               pin_memory=pin_memory,
                               drop_last=False)

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids)
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    for epoch in args.test_epochs:
        model_path = osp.join(args.resume,
                              'checkpoint_ep' + str(epoch) + '.pth.tar')
        print("Loading checkpoint from '{}'".format(model_path))
        checkpoint = torch.load(model_path)
        model.load_state_dict(checkpoint['state_dict'])

        if use_gpu: model = model.cuda()

        print("Evaluate")
        with torch.no_grad():
            test(model, queryloader, galleryloader, use_gpu)
def train(parameters: Dict[str, float]) -> nn.Module:
    global args 
    print("====", args.focus,  "=====")
    torch.manual_seed(args.seed)
    # args.gpu_devices = "0,1"
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    cudnn.benchmark = True
    torch.cuda.manual_seed_all(args.seed)
    
    dataset = data_manager.init_dataset(name=args.dataset, sampling= args.sampling)
    transform_test = transforms.Compose([
    transforms.Resize((args.height, args.width), interpolation=3),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])


    pin_memory = True if use_gpu else False
    transform_train = transforms.Compose([
                transforms.Resize((args.height, args.width), interpolation=3),
                transforms.RandomHorizontalFlip(p=0.5),
                transforms.Pad(10),
                Random2DTranslation(args.height, args.width),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
            ])

    batch_size = int(round(parameters.get("batch_size", 32) )) 
    base_learning_rate = 0.00035
    # weight_decay = 0.0005
    alpha = parameters.get("alpha", 1.2)
    sigma = parameters.get("sigma", 0.8)
    l = parameters.get("l", 0.5)
    beta_ratio = parameters.get("beta_ratio", 0.5)
    gamma = parameters.get("gamma", 0.1)
    margin = parameters.get("margin", 0.3)
    weight_decay = parameters.get("weight_decay", 0.0005)
    lamb = 0.3 
    
    num_instances = 4
    pin_memory = True
    trainloader = DataLoader(
    VideoDataset(dataset.train, seq_len=args.seq_len, sample='random',transform=transform_train),
    sampler=RandomIdentitySampler(dataset.train, num_instances=args.num_instances),
    batch_size=batch_size, num_workers=args.workers,
    pin_memory=pin_memory, drop_last=True,
    )

    if args.dataset == 'mars_subset' :
        validation_loader = DataLoader(
            VideoDataset(dataset.val, seq_len=8, sample='random', transform=transform_test),
            batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
            pin_memory=pin_memory, drop_last=False,
        )
    else:
        queryloader = DataLoader(
            VideoDataset(dataset.val_query, seq_len=args.seq_len, sample='dense_subset', transform=transform_test),
            batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
            pin_memory=pin_memory, drop_last=False,
        )
        galleryloader = DataLoader(
            VideoDataset(dataset.val_gallery, seq_len=args.seq_len, sample='dense_subset', transform=transform_test),
            batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
            pin_memory=pin_memory, drop_last=False,
        )

    criterion_htri = TripletLoss(margin, 'cosine')
    criterion_xent = CrossEntropyLabelSmooth(dataset.num_train_pids)
    criterion_center_loss = CenterLoss(use_gpu=1)
    criterion_osm_caa = OSM_CAA_Loss(alpha=alpha , l=l , osm_sigma=sigma )
    args.arch = "ResNet50ta_bt"
    model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids, loss={'xent', 'htri'})
    if use_gpu:
        model = nn.DataParallel(model).cuda()
    params = []
    for key, value in model.named_parameters():
        if not value.requires_grad:
            continue
        lr = base_learning_rate
        weight_decay = weight_decay
        params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]

    optimizer = torch.optim.Adam(params)
    scheduler = WarmupMultiStepLR(optimizer, milestones=[40, 70], gamma=gamma, warmup_factor=0.01, warmup_iters=10)
    optimizer_center = torch.optim.SGD(criterion_center_loss.parameters(), lr=0.5)
    start_epoch = args.start_epoch
    best_rank1 = -np.inf
    num_epochs = 121
    
    if 'mars' not in args.dataset :
        num_epochs = 121
    # test_rerank(model, queryloader, galleryloader, args.pool, use_gpu, lamb=lamb , parameters=parameters)
    for epoch in range (num_epochs):
        vals = train_model(model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu , optimizer_center , criterion_center_loss, criterion_osm_caa, beta_ratio)
        if math.isnan(vals[0]):
            return 0
        scheduler.step()
        if epoch % 40 ==0 :
            print("TripletLoss {:.6f} OSM Loss {:.6f} Cross_entropy {:.6f} Total Loss {:.6f}  ".format(vals[1] , vals[3] , vals[1] , vals[0]))            
    
    if args.dataset == 'mars_subset' :
        result1 = test_validation(model, validation_loader, args.pool, use_gpu,  parameters=parameters)
        del validation_loader
    else:
        result1= test_rerank(model, queryloader, galleryloader, args.pool, use_gpu, lamb=lamb , parameters=parameters)    
        del queryloader
        del galleryloader
    del trainloader 
    del model
    del criterion_htri
    del criterion_xent
    del criterion_center_loss
    del criterion_osm_caa
    del optimizer
    del optimizer_center
    del scheduler
    return result1
Beispiel #3
0
parser.add_argument('--save_dir', type=str, default='log-mars-ap3d')

parser.add_argument('--gpu', default='0', type=str, 
                    help='gpu device ids for CUDA_VISIBLE_DEVICES')


args = parser.parse_args()
                    
test_image = tp.Numpy.Placeholder((args.test_batch , 3, args.seq_len, args.height,args.width))
input_pid = tp.Numpy.Placeholder((args.train_batch,))


func_config = flow.FunctionConfig()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
func_config.default_data_type(flow.float)
dataset = data_manager.init_dataset(name=args.dataset, root=args.root)


@flow.global_function(function_config=func_config)
def gallery_job(
    image:test_image
)->tp.Numpy: 
    model = models.init_model(name=args.arch, num_classes=dataset.num_gallery_pids,training=False,resnetblock=getresnet)
    feat=model.build_network(image)
    feat=math.reduce_mean(feat,1)
    feat=flow.layers.batch_normalization(inputs=feat,
                                                axis=1,
                                                 momentum=0.997,
                                                 epsilon=1.001e-5,
                                                 center=True,
                                                 scale=True,
Beispiel #4
0
                    type=str,
                    default='map',
                    help="map,rerank_map")

args = parser.parse_args()

torch.manual_seed(args.seed)

# args.gpu_devices = "0,1"
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
use_gpu = torch.cuda.is_available()

cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)

dataset = data_manager.init_dataset(name=args.dataset)
pin_memory = True if use_gpu else False

transform_train = transforms.Compose([
    transforms.Resize((args.height, args.width), interpolation=3),
    transforms.RandomHorizontalFlip(p=0.5),
    transforms.Pad(10),
    Random2DTranslation(args.height, args.width),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                          0.225]),
    RandomErasing2(probability=0.5, mean=[0.485, 0.456, 0.406])
])

transform_test = transforms.Compose([
    transforms.Resize((args.height, args.width), interpolation=3),
Beispiel #5
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu))
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_dataset(name=args.dataset, root=args.root)

    # Data augmentation
    spatial_transform_train = ST.Compose([
        ST.Scale((args.height, args.width), interpolation=3),
        ST.RandomHorizontalFlip(),
        ST.ToTensor(),
        ST.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    temporal_transform_train = TT.TemporalRandomCrop(size=args.seq_len,
                                                     stride=args.sample_stride)

    spatial_transform_test = ST.Compose([
        ST.Scale((args.height, args.width), interpolation=3),
        ST.ToTensor(),
        ST.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    temporal_transform_test = TT.TemporalBeginCrop()

    pin_memory = True if use_gpu else False

    if args.dataset != 'mars':
        trainloader = DataLoader(
            VideoDataset(dataset.train_dense,
                         spatial_transform=spatial_transform_train,
                         temporal_transform=temporal_transform_train),
            sampler=RandomIdentitySampler(dataset.train_dense,
                                          num_instances=args.num_instances),
            batch_size=args.train_batch,
            num_workers=args.workers,
            pin_memory=pin_memory,
            drop_last=True)
    else:
        trainloader = DataLoader(
            VideoDataset(dataset.train,
                         spatial_transform=spatial_transform_train,
                         temporal_transform=temporal_transform_train),
            sampler=RandomIdentitySampler(dataset.train,
                                          num_instances=args.num_instances),
            batch_size=args.train_batch,
            num_workers=args.workers,
            pin_memory=pin_memory,
            drop_last=True)

    queryloader = DataLoader(VideoDataset(
        dataset.query,
        spatial_transform=spatial_transform_test,
        temporal_transform=temporal_transform_test),
                             batch_size=args.test_batch,
                             shuffle=False,
                             num_workers=0,
                             pin_memory=pin_memory,
                             drop_last=False)

    galleryloader = DataLoader(VideoDataset(
        dataset.gallery,
        spatial_transform=spatial_transform_test,
        temporal_transform=temporal_transform_test),
                               batch_size=args.test_batch,
                               shuffle=False,
                               num_workers=0,
                               pin_memory=pin_memory,
                               drop_last=False)

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids)
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    criterion_xent = nn.CrossEntropyLoss()
    criterion_htri = TripletLoss(margin=args.margin, distance=args.distance)

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)
    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=args.stepsize,
                                         gamma=args.gamma)
    start_epoch = args.start_epoch

    if args.resume:
        print("Loading checkpoint from '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']

    if use_gpu:
        model = nn.DataParallel(model).cuda()
        # model = model.cuda()

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Start training")

    for epoch in range(start_epoch, args.max_epoch):
        scheduler.step()

        start_train_time = time.time()
        train(epoch, model, criterion_xent, criterion_htri, optimizer,
              trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        if (epoch + 1) >= args.start_eval and args.eval_step > 0 and (
                epoch + 1) % args.eval_step == 0 or (epoch +
                                                     1) == args.max_epoch:
            print("==> Test")
            with torch.no_grad():
                # test using 4 frames
                rank1 = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': rank1,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(
        best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))