Beispiel #1
0
def run(args):
    argstr = yaml.dump(args.__dict__, default_flow_style=False)
    print('arguments:')
    print(argstr)

    argfile = osp.join(osp.join(args.expdir), 'evaluate_args.yaml')

    args.cuda = not args.no_cuda

    if not args.dry:
        utils.ifmakedirs(args.expdir)
        logging.print_file(argstr, argfile)

    collate_fn = dict(
        collate_fn=list_collate) if args.input_crop == 'rect' else {}
    transforms = get_transforms(input_size=args.input_size,
                                crop=(args.input_crop == 'square'),
                                need=('val', ))

    if args.dataset.startswith('imagenet'):
        dataset = IdDataset(
            IN1K(args.imagenet_path,
                 args.dataset[len('imagenet-'):],
                 transform=transforms['val']))
        mode = "classification"
    else:
        raise NotImplementedError

    loader = DataLoader(dataset,
                        batch_size=args.batch_size,
                        num_workers=args.workers,
                        shuffle=args.shuffle,
                        pin_memory=True,
                        **collate_fn)

    model = get_multigrain(args.backbone,
                           include_sampling=False,
                           pretrained_backbone=args.pretrained_backbone)

    p = model.pool.p

    checkpoints = utils.CheckpointHandler(args.expdir)

    if checkpoints.exists(args.resume_epoch, args.resume_from):
        epoch = checkpoints.resume(model,
                                   resume_epoch=args.resume_epoch,
                                   resume_from=args.resume_from,
                                   return_extra=False)
    else:
        raise ValueError('Checkpoint ' + args.resume_from + ' not found')

    if args.pooling_exponent is not None:  # overwrite stored pooling exponent
        p.data.fill_(args.init_pooling_exponent)

    print("Multigrain model with {} backbone and p={} pooling:".format(
        args.backbone, p.item()))
    print(model)

    if args.cuda:
        model = utils.cuda(model)

    model.eval()  # freeze batch normalization

    print("Evaluating", args.dataset)

    metrics_history = OD()
    metrics = defaultdict(utils.HistoryMeter)

    embeddings = []
    index = None
    tic()
    for i, batch in enumerate(loader):
        with torch.no_grad():
            if args.cuda:
                batch = utils.cuda(batch)
            metrics["data_time"].update(1000 * toc())
            tic()
            output_dict = model(batch['input'])
        if mode == "classification":
            target = batch['classifier_target']
            top1, top5 = utils.accuracy(output_dict['classifier_output'],
                                        target,
                                        topk=(1, 5))
            metrics["val_top1"].update(top1)
            metrics["val_top5"].update(top5)
        elif mode == "retrieval":
            if index is None: index = faiss.IndexFlatL2(descriptors.size(1))
            descriptors = output_dict['normalized_embedding']
            for e in descriptors.cpu():
                index.append(e)
        metrics["batch_time"].update(1000 * toc())
        tic()
        print(
            logging.str_metrics(metrics,
                                iter=i,
                                num_iters=len(loader),
                                epoch=epoch,
                                num_epochs=epoch))
    print(logging.str_metrics(metrics, epoch=epoch, num_epochs=1))
    for k in metrics:
        metrics[k] = metrics[k].avg
    toc()

    metrics_history[epoch] = metrics
    checkpoints.save_metrics(metrics_history)
def run(args):
    argstr = yaml.dump(args.__dict__, default_flow_style=False)
    print('arguments:')
    print(argstr)

    argfile = osp.join(osp.join(args.expdir), 'whiten_args.yaml')

    args.cuda = not args.no_cuda

    if not args.dry:
        utils.ifmakedirs(args.expdir)
        logging.print_file(argstr, argfile)

    collate_fn = dict(
        collate_fn=list_collate) if args.input_crop == 'rect' else {}

    transforms = get_transforms(input_size=args.input_size,
                                crop=(args.input_crop == 'square'),
                                need=('val', ),
                                backbone=args.backbone)
    dataset = ListDataset(args.whiten_path, args.whiten_list,
                          transforms['val'])
    if args.num_whiten_images != -1:
        dataset = Subset(dataset, list(range(args.num_whiten_images)))
    loader = DataLoader(dataset,
                        batch_size=args.batch_size,
                        num_workers=args.workers,
                        pin_memory=True,
                        **collate_fn)

    model = get_multigrain(args.backbone,
                           include_sampling=False,
                           pretrained_backbone=args.pretrained_backbone)

    if args.cuda:
        model = utils.cuda(model)

    p = model.pool.p

    checkpoints = utils.CheckpointHandler(args.expdir)

    if checkpoints.exists(args.resume_epoch, args.resume_from):
        resume_epoch = checkpoints.resume(model,
                                          resume_epoch=args.resume_epoch,
                                          resume_from=args.resume_from,
                                          return_extra=False)
    else:
        raise ValueError('Checkpoint ' + args.resume_from + ' not found')

    if args.pooling_exponent is not None:  # overwrite stored pooling exponent
        p.data.fill_(args.pooling_exponent)

    print("Multigrain model with {} backbone and p={} pooling:".format(
        args.backbone, p.item()))
    print(model)

    model.init_whitening()
    model.eval()

    print("Computing embeddings...")
    embeddings = []
    for i, batch in enumerate(loader):
        if i % (len(loader) / 100) < 1:
            print("{}/{} ({}%)".format(i, len(loader),
                                       int(i // (len(loader) / 100))))
        with torch.no_grad():
            if args.cuda:
                batch = utils.cuda(batch)
            embeddings.append(model(batch)['embedding'].cpu())
    embeddings = torch.cat(embeddings)
    if args.no_include_last:
        embeddings = embeddings[:, :-1]

    print("Computing whitening...")
    m, P = get_whiten(embeddings)

    if args.no_include_last:
        # add an preserved channel to the PCA
        m = torch.cat((m, torch.tensor([0.0])), 0)
        D = P.size(0)
        P = torch.cat((P, torch.zeros(1, D)), 0)
        P = torch.cat((P, torch.cat(
            (torch.zeros(D, 1), torch.tensor([1.0])), 1)), 1)

    model.integrate_whitening(m, P)

    if not args.dry:
        checkpoints.save(model, resume_epoch if resume_epoch != -1 else 0)
Beispiel #3
0
def run(args):
    argstr = yaml.dump(args.__dict__, default_flow_style=False)
    print('arguments:')
    print(argstr)
    argfile = osp.join(osp.join(args.expdir), 'finetune_p_args.yaml')

    if osp.isfile(argfile):
        oldargs = yaml.load(open(argfile))
        if oldargs != args.__dict__:
            print('WARNING: Changed configuration keys compared to stored experiment')
            utils.arguments.compare_dicts(oldargs, args.__dict__, verbose=True)

    args.cuda = not args.no_cuda
    args.validate_first = not args.no_validate_first
    args.validate = not args.no_validate

    if not args.dry:
        utils.ifmakedirs(args.expdir)
        logging.print_file(argstr, argfile)

    transforms = get_transforms(IN1K, args.input_size, crop=(args.input_crop == 'square'), need=('val',),
                                backbone=args.backbone)
    datas = {}
    for split in ('train', 'val'):
        datas[split] = IdDataset(IN1K(args.imagenet_path, split, transform=transforms['val']))
    loaders = {}
    collate_fn = dict(collate_fn=list_collate) if args.input_crop == 'rect' else {}
    selected = []
    count = Counter()

    # following code: extract args.images_per_class pictures per class in training dataset
    for i, label in enumerate(datas['train'].dataset.labels):
        if count[label] < args.images_per_class:
            selected.append(i)
            count[label] += 1
    datas['train'].dataset = Subset(datas['train'].dataset, selected)
    loaders['train'] = DataLoader(datas['train'], batch_size=args.batch_size, shuffle=True,
                                  num_workers=args.workers, pin_memory=True, **collate_fn)
    loaders['val'] = DataLoader(datas['val'], batch_size=args.batch_size, shuffle=args.shuffle_val,
                                num_workers=args.workers, pin_memory=True, **collate_fn)

    model = get_multigrain(args.backbone, include_sampling=False,
                           pretrained_backbone=args.pretrained_backbone, learn_p=True)

    criterion = torch.nn.CrossEntropyLoss()
    if args.cuda:
        criterion = utils.cuda(criterion)
        model = utils.cuda(model)

    optimizers = OD()
    p = model.pool.p

    # SGD(self, params, lr=required, momentum=0, dampening=0, weight_decay=0, nesterov=False)
    optimizers['p'] = SGD([p], lr=args.learning_rate, momentum=args.momentum)
    optimizers = MultiOptim(optimizers)

    def training_step(batch):
        optimizers.zero_grad()

        output_dict = model(batch['input'])
        loss = criterion(output_dict['classifier_output'], batch['classifier_target'])
        top1, top5 = utils.accuracy(output_dict['classifier_output'].data, batch['classifier_target'].data, topk=(1, 5))

        p.grad = torch.autograd.grad(loss, p)[0]  # partial backward
        optimizers.step()

        return OD([
            ('cross_entropy', loss.item()),
            ('p', p.item()),
            ('top1', top1),
            ('top5', top5),
        ])

    def validation_step(batch):
        with torch.no_grad():
            output_dict = model(batch['input'])
            target = batch['classifier_target']
            xloss = criterion(output_dict['classifier_output'], target)
            top1, top5 = utils.accuracy(output_dict['classifier_output'], target, topk=(1, 5))

        return OD([
            ('cross_entropy', xloss.item()),
            ('top1', top1),
            ('top5', top5),
        ])

    metrics_history = OD()

    checkpoints = utils.CheckpointHandler(args.expdir)

    if checkpoints.exists(args.resume_epoch, args.resume_from):
        epoch = checkpoints.resume(model, metrics_history=metrics_history,
                                   resume_epoch=args.resume_epoch, resume_from=args.resume_from)
    else:
        raise ValueError('Checkpoint ' + args.resume_from + ' not found')

    if args.init_pooling_exponent is not None:  # overwrite stored pooling exponent
        p.data.fill_(args.init_pooling_exponent)

    print("Multigrain model with {} backbone and p={} pooling:".format(args.backbone, p.item()))
    print(model)

    def loop(loader, step, epoch, prefix=''):  # Training or validation loop
        metrics = defaultdict(utils.HistoryMeter if prefix == 'train_' else utils.AverageMeter)
        tic()
        for i, batch in enumerate(loader):
            if prefix == 'train_':
                lr = args.learning_rate * (1 - i / len(loader)) ** args.learning_rate_decay_power
                optimizers['p'].param_groups[0]['lr'] = lr
            if args.cuda:
                batch = utils.cuda(batch)
            data_time = 1000 * toc();
            tic()
            step_metrics = step(batch)
            step_metrics['data_time'] = data_time
            step_metrics['batch_time'] = 1000 * toc();
            tic()
            for (k, v) in step_metrics.items():
                metrics[prefix + k].update(v, len(batch['input']))
            print(logging.str_metrics(metrics, iter=i, num_iters=len(loader), epoch=epoch, num_epochs=epoch))
        print(logging.str_metrics(metrics, epoch=epoch, num_epochs=epoch))
        toc()
        if prefix == 'val_':
            # ipdb.set_trace()
            return OD((k, v.avg) for (k, v) in metrics.items())
        return OD((k, v.hist) for (k, v) in metrics.items())

    if args.validate_first and 0 not in metrics_history:
        model.eval()

        metrics_history[epoch[0]] = loop(loaders['val'], validation_step, epoch, 'val_')
        checkpoints.save_metrics(metrics_history)

    model.eval()  # freeze batch normalization
    metrics = loop(loaders['train'], training_step, epoch, 'train_')
    metrics['last_p'] = p.item()

    if args.validate:
        model.eval()
        # ipdb.set_trace()

        metrics.update(loop(loaders['val'], validation_step, epoch[0] + 1, 'val_'))
        metrics_history[epoch[0] + 1] = metrics

    if not args.dry:
        utils.make_plots(metrics_history, args.expdir)
        checkpoints.save(model, epoch[0] + 1, optimizers, metrics_history)
Beispiel #4
0
def run(args):
    argstr = yaml.dump(args.__dict__, default_flow_style=False)
    print(argstr)

    argfile = osp.join(osp.join(args.expdir), 'evaluate_args.yaml')

    args.cuda = not args.no_cuda

    if not args.dry:
        utils.ifmakedirs(args.expdir)
        logging.print_file(argstr, argfile)

    collate_fn = dict(
        collate_fn=my_collate) if args.input_crop == 'rect' else {}

    transforms = get_transforms(input_size=args.input_size, \
        crop=(args.input_crop == 'square'), need=('val',),
        backbone=args.backbone)

    if args.dataset.startswith('imagenet'):
        dataset = IdDataset(
            IN1K(args.imagenet_path,
                 args.dataset[len('imagenet-'):],
                 transform=transforms['val']))
        mode = "classification"
    else:
        dataset = IdDataset(
            meizi_dataset(root=args.meizi_path, transform=transforms['val']))
        mode = "retrieval"

    loader = DataLoader(dataset,
                        batch_size=args.batch_size,
                        num_workers=args.workers,
                        shuffle=args.shuffle,
                        pin_memory=True,
                        **collate_fn)

    import torchvision.datasets as datasets
    val_loader = torch.utils.data.DataLoader(FidDataset(
        datasets.ImageFolder(args.meizi_path, transforms['val'])),
                                             batch_size=args.batch_size,
                                             shuffle=args.shuffle,
                                             num_workers=args.workers,
                                             pin_memory=True,
                                             **collate_fn)

    # ipdb.set_trace()

    model = get_multigrain(args.backbone,
                           include_sampling=False,
                           pretrained_backbone=args.pretrained_backbone)

    p = model.pool.p
    checkpoints = utils.CheckpointHandler(args.expdir)

    if checkpoints.exists(args.resume_epoch, args.resume_from):
        epoch = checkpoints.resume(model,
                                   resume_epoch=args.resume_epoch,
                                   resume_from=args.resume_from,
                                   return_extra=False)
    else:
        raise ValueError('Checkpoint ' + args.resume_from + ' not found')

    if args.pooling_exponent is not None:  # overwrite stored pooling exponent
        p.data.fill_(args.pooling_exponent)

    model = utils.cuda(model)

    print("Multigrain model with {} backbone and p={} pooling:".format(
        args.backbone, p.item()))
    #print(model)

    #temp = torch.stack(temp, dim=0)

    if True:
        #
        gpuDeviceIds = [1]
        device = torch.device('cuda:{}'.format(gpuDeviceIds[0]) if torch.cuda.
                              is_available() else 'cpu')
        # device = torch.device('cuda:{}'.format('0,1,2,3') if torch.cuda.is_available() else 'cpu')
        model = torch.nn.DataParallel(model, device_ids=gpuDeviceIds)
        model.to(device)
        # model.eval()
    else:
        # only use one gpu
        model.cuda()
        model.eval()

    metrics_history = OD()
    metrics = defaultdict(utils.HistoryMeter)

    tic()

    start_id = 0

    imageCount = len(dataset)
    Dim = 2048

    xb = MmapVectorUtils.Open(args.embeddingFilePath,
                              True,
                              shape=(imageCount, Dim + 2))

    # dataloader_iter = iter(loader)
    # data, target, fid = next(dataloader_iter)

    multiGpu = True

    # dataloader_iter = iter(loader)

    # print("val_loader lens", len(loader))
    # length = len(loader)
    # for step in tqdm(range(length)):
    #     try:
    #         dict_temp = next(dataloader_iter)

    #         data = dict_temp['input']
    #         classifier_target = dict_temp['classifier_target']
    #         vid = dict_temp['vid']
    #         frame_id = dict_temp['frame_id']
    #         instance_target = dict_temp['instance_target']

    #         batch = utils.cuda(data)

    #         data = batch.to(device) if True else data.cuda()

    #         data = data.to(device) if multiGpu else data.cuda()

    #         output = model(data)
    #         output = output['normalized_embedding']
    #         cpuOutput = output.cpu().detach().numpy()

    #         if not np.all(np.isfinite(cpuOutput)):
    #             print("Return infinit results in step ", step)
    #             continue

    #         yield target, fid, cpuOutput
    #     except Exception as e:
    #         print("error when prossess dataloader_iter, step ", step)
    #         print('Error:', e)

    for i, batch in enumerate(loader):
        #try:
        batch_vid = batch['vid']
        batch_fid = batch['frame_id']

        with torch.no_grad():
            #if args.cuda:
            if True:
                batch = utils.cuda(batch)

            metrics["data_time"].update(1000 * toc())
            tic()
            batch_input_conv = torch.stack(batch['input'], dim=0)

            batch_input_conv = batch_input_conv.to(
                device) if True else batch_input_conv.cuda()

            output_dict = model(batch_input_conv)
    #             data = batch.to(device) if True else data.cuda()

    #             output = model(data)
    #             cpuOutput = output.cpu().detach().numpy()

    # data = data.to(self.device) if self.multiGpu else data.cuda()
    # output = model(data)
    # output = output['normalized_embedding']
    # cpuOutput = output.cpu().detach().numpy()

    # output_dict = model(batch['input'])
    # output_dict = output_dict.cpu().detach().numpy()

    # metrics["data_time"].update(1000 * toc());
    # tic()

        if mode == "classification":
            # target = batch['classifier_target']
            # top1, top5 = utils.accuracy(output_dict['classifier_output'], target, topk=(1, 5))
            # metrics["val_top1"].update(top1, n=len(batch['input']))
            # metrics["val_top5"].update(top5, n=len(batch['input']))
            raise ValueError('only focus on retrival')

        elif mode == "retrieval":
            descriptors = output_dict['normalized_embedding']

            n, dim = descriptors.shape
            end_id = n + start_id
            end_id = min(imageCount, end_id)
            n = end_id - start_id

            xb[start_id:end_id, 0:1] = np.array(batch_vid).reshape(n, 1)
            xb[start_id:end_id, 1:2] = np.array(batch_fid).reshape(n, 1)

            xb[start_id:end_id, 2:] = descriptors[0:n].cpu().numpy()

            start_id += n
        metrics["batch_time"].update(1000 * toc())
        tic()
        print(
            logging.str_metrics(metrics,
                                iter=i,
                                num_iters=len(loader),
                                epoch=epoch,
                                num_epochs=epoch))

    print(logging.str_metrics(metrics, epoch=epoch, num_epochs=1))
    for k in metrics:
        metrics[k] = metrics[k].avg

    toc()

    metrics_history[epoch] = metrics
Beispiel #5
0
def run(args):
    argstr = yaml.dump(args.__dict__, default_flow_style=False)
    print('arguments:')
    print(argstr)
    argfile = osp.join(args.expdir, 'train_args.yaml')

    if osp.isfile(argfile):
        oldargs = yaml.load(open(argfile))
        if oldargs is not None and oldargs != args.__dict__:
            print(
                'WARNING: Changed configuration keys compared to stored experiment'
            )
            utils.arguments.compare_dicts(oldargs, args.__dict__, verbose=True)

    args.cuda = not args.no_cuda
    args.validate_first = not args.no_validate_first

    if not args.dry:
        utils.ifmakedirs(args.expdir)
        logging.print_file(argstr, argfile)

    transforms = get_transforms(IN1K, args.input_size, args.augmentation)
    datas = {}
    for split in ('train', 'val'):
        imload = preloader(args.imagenet_path, args.preload_dir_imagenet
                           ) if args.preload_dir_imagenet else default_loader
        datas[split] = IdDataset(
            IN1K(args.imagenet_path,
                 split,
                 transform=transforms[split],
                 loader=imload))
    loaders = {}
    loaders['train'] = DataLoader(datas['train'],
                                  batch_sampler=RASampler(
                                      len(datas['train']),
                                      args.batch_size,
                                      args.repeated_augmentations,
                                      args.epoch_len_factor,
                                      shuffle=True,
                                      drop_last=False),
                                  num_workers=args.workers,
                                  pin_memory=True)
    loaders['val'] = DataLoader(datas['val'],
                                batch_size=args.batch_size,
                                num_workers=args.workers,
                                pin_memory=True)

    model = get_multigrain(args.backbone,
                           p=args.pooling_exponent,
                           include_sampling=not args.global_sampling,
                           pretrained_backbone=args.pretrained_backbone)
    print("Multigrain model with {} backbone and p={} pooling:".format(
        args.backbone, args.pooling_exponent))
    print(model)

    cross_entropy = torch.nn.CrossEntropyLoss()
    cross_entropy_criterion = (cross_entropy, ('classifier_output',
                                               'classifier_target'),
                               args.classif_weight)
    if args.global_sampling:
        margin = SampledMarginLoss(margin_args=dict(beta_init=args.beta_init))
        beta = margin.margin.beta
        margin_criterion = (margin, ('normalized_embedding',
                                     'instance_target'),
                            1.0 - args.classif_weight)
    else:
        margin = MarginLoss(args.beta_init)
        beta = margin.beta
        margin_criterion = (margin,
                            ('anchor_embeddings', 'negative_embeddings',
                             'positive_embeddings'), 1.0 - args.classif_weight)

    extra = {'beta': beta}

    criterion = MultiCriterion(dict(cross_entropy=cross_entropy_criterion,
                                    margin=margin_criterion),
                               skip_zeros=(args.repeated_augmentations == 1))

    if args.cuda:
        criterion = utils.cuda(criterion)
        model = utils.cuda(model)

    optimizers = OD()
    optimizers['backbone'] = SGD(model.parameters(),
                                 lr=args.learning_rate,
                                 momentum=args.momentum,
                                 weight_decay=args.weight_decay)
    optimizers['margin_beta'] = SGD([beta],
                                    lr=args.learning_rate * args.beta_lr,
                                    momentum=args.momentum)
    optimizers = MultiOptim(optimizers)
    optimizers.set_base_lr()

    if args.cuda:
        model = nn.DataParallel(model)

    def set_learning_rate(epoch):
        factor = 1.0
        for (drop, gamma) in zip(args.lr_drops_epochs, args.lr_drops_factors):
            if epoch > drop:
                factor *= gamma
        optimizers.lr_multiply(factor)

    batches_accumulated = 0

    def training_step(batch):
        nonlocal batches_accumulated
        if batches_accumulated == 0:
            optimizers.zero_grad()

        output_dict = model(batch['input'], batch['instance_target'])
        output_dict['classifier_target'] = batch['classifier_target']
        loss_dict = criterion(output_dict)
        top1, top5 = utils.accuracy(output_dict['classifier_output'].data,
                                    output_dict['classifier_target'].data,
                                    topk=(1, 5))

        loss_dict['loss'].backward()
        batches_accumulated += 1

        if batches_accumulated == args.gradient_accum:
            mag = {}
            for (name, p) in model.named_parameters():
                mag[name] = p.grad.norm().item()
            optimizers.step()
            batches_accumulated = 0

        return_dict = OD()
        for key in ['cross_entropy', 'margin', 'loss']:
            if key in loss_dict:
                return_dict[key] = loss_dict[key].item()
        return_dict['beta'] = beta.item()
        return_dict['top1'] = top1
        return_dict['top5'] = top5

        return return_dict

    def validation_step(batch):
        with torch.no_grad():
            output_dict = model(batch['input'])
            target = batch['classifier_target']
            xloss = cross_entropy(output_dict['classifier_output'], target)
            top1, top5 = utils.accuracy(output_dict['classifier_output'],
                                        target,
                                        topk=(1, 5))

        return OD([
            ('cross_entropy', xloss.item()),
            ('top1', top1),
            ('top5', top5),
        ])

    metrics_history = OD()

    checkpoints = utils.CheckpointHandler(args.expdir, args.save_every)

    if checkpoints.exists(args.resume_epoch, args.resume_from):
        begin_epoch, loaded_extra = checkpoints.resume(model, optimizers,
                                                       metrics_history,
                                                       args.resume_epoch,
                                                       args.resume_from)
        if 'beta' in loaded_extra:
            beta.data.copy_(loaded_extra['beta'])
        else:
            print('(re)initialized beta to {}'.format(beta.item()))
    else:
        raise ValueError('Checkpoint ' + args.resume_from + ' not found')

    def loop(loader, step, epoch, prefix=''):  # Training or validation loop
        metrics = defaultdict(utils.AverageMeter)
        tic()
        for i, batch in enumerate(loader):
            if args.cuda:
                batch = utils.cuda(batch)
            data_time = 1000 * toc()
            tic()
            step_metrics = step(batch)
            step_metrics['data_time'] = data_time
            step_metrics['batch_time'] = 1000 * toc()
            tic()
            for (k, v) in step_metrics.items():
                metrics[prefix + k].update(v, len(batch['input']))
            print(
                logging.str_metrics(metrics,
                                    iter=i,
                                    num_iters=len(loader),
                                    epoch=epoch,
                                    num_epochs=args.epochs))
        print(logging.str_metrics(metrics, epoch=epoch,
                                  num_epochs=args.epochs))
        toc()
        return OD((k, v.avg) for (k, v) in metrics.items())

    if args.validate_first and begin_epoch == 0 and 0 not in metrics_history:
        model.eval()
        metrics_history[0] = loop(loaders['val'], validation_step, begin_epoch,
                                  'val_')
        checkpoints.save_metrics(metrics_history)

    for epoch in range(begin_epoch, args.epochs):
        set_learning_rate(epoch)

        batches_accumulated = 0
        model.train()
        metrics = loop(loaders['train'], training_step, epoch, 'train_')

        model.eval()
        metrics.update(loop(loaders['val'], validation_step, epoch, 'val_'))

        metrics_history[epoch + 1] = metrics

        if not args.dry:
            utils.make_plots(metrics_history, args.expdir)
            checkpoints.save(model, epoch + 1, optimizers, metrics_history,
                             extra)
Beispiel #6
0
def run(args):
    argstr = yaml.dump(args.__dict__, default_flow_style=False)
    print(argstr)

    argfile = osp.join(osp.join(args.expdir), 'evaluate_args.yaml')

    args.cuda = not args.no_cuda

    if not args.dry:
        utils.ifmakedirs(args.expdir)
        logging.print_file(argstr, argfile)
    
    collate_fn = dict(collate_fn=my_collate) if args.input_crop == 'rect' else {}

    transforms = get_transforms(input_size=args.input_size, \
        crop=(args.input_crop == 'square'), need=('val',),
        backbone=args.backbone)

    if args.dataset.startswith('imagenet'):
        dataset = IdDataset(IN1K(args.imagenet_path, args.dataset[len('imagenet-'):],
                                 transform=transforms['val']))
        mode = "classification"
    else:
        dataset = IdDataset(meizi_dataset(root=args.meizi_path, transform=transforms['val'], starts=args.starts))
        mode = "retrieval"

    loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=args.workers, shuffle=args.shuffle,
                        pin_memory=True, **collate_fn)
    model = get_multigrain(args.backbone, include_sampling=False, pretrained_backbone=args.pretrained_backbone)

    p = model.pool.p
    checkpoints = utils.CheckpointHandler(args.expdir)

    if checkpoints.exists(args.resume_epoch, args.resume_from):
        epoch = checkpoints.resume(model, resume_epoch=args.resume_epoch, resume_from=args.resume_from,
                                   return_extra=False)
    else:
        raise ValueError('Checkpoint ' + args.resume_from + ' not found')

    if args.pooling_exponent is not None:  # overwrite stored pooling exponent
        p.data.fill_(args.pooling_exponent)

    print("Multigrain model with {} backbone and p={} pooling:".format(args.backbone, p.item()))
    # print(model)

    if True:
        model = utils.cuda(model)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model.to(device)
    model.eval()  # freeze batch normalization

    metrics_history = OD()
    metrics = defaultdict(utils.HistoryMeter)

    index = None
    tic()

    start_id = 0
    
    imageCount = len(dataset)
    Dim = 2048

    xb = MmapVectorUtils.Open(args.embeddingFilePath, True, shape=(imageCount, Dim + 2))

    for i, batch in enumerate(loader):
        # ipdb.set_trace()
        #try:
        batch_vid = batch['vid']
        batch_fid = batch['frame_id']

        with torch.no_grad():
            #if args.cuda:
            if True:
                batch = utils.cuda(batch)

            metrics["data_time"].update(1000 * toc());
            tic()
            output_dict = model(batch['input'])

        if mode == "classification":
            # target = batch['classifier_target']
            # top1, top5 = utils.accuracy(output_dict['classifier_output'], target, topk=(1, 5))
            # metrics["val_top1"].update(top1, n=len(batch['input']))
            # metrics["val_top5"].update(top5, n=len(batch['input']))
            raise ValueError('only focus on retrival')

        elif mode == "retrieval":
            descriptors = output_dict['normalized_embedding']

            n, dim = descriptors.shape
            end_id = n + start_id
            end_id = min(imageCount, end_id)
            n = end_id - start_id
            
            xb[start_id:end_id, 0:1] = np.array(batch_vid).reshape(n,1)
            xb[start_id:end_id, 1:2] = np.array(batch_fid).reshape(n,1)

            xb[start_id:end_id, 2:] = descriptors[0:n].cpu().numpy()

            start_id += n
        metrics["batch_time"].update(1000 * toc());
        tic()
        print(logging.str_metrics(metrics, iter=i, num_iters=len(loader), epoch=epoch, num_epochs=epoch))

    print(logging.str_metrics(metrics, epoch=epoch, num_epochs=1))
    for k in metrics: metrics[k] = metrics[k].avg

    toc()

    metrics_history[epoch] = metrics