Example #1
0
File: eval.py Project: guybuk/ANCOR
def main():
    args = parse_option()

    meta_fg_val_dataset, meta_val_dataset, n_cls = get_eval_datasets(args)

    meta_valloader = DataLoader(meta_val_dataset,
                                batch_size=args.test_batch_size,
                                shuffle=False,
                                drop_last=False,
                                num_workers=args.num_workers)
    meta_fg_valloader = DataLoader(meta_fg_val_dataset,
                                   batch_size=args.test_batch_size,
                                   shuffle=False,
                                   drop_last=False,
                                   num_workers=args.num_workers)

    model = create_model(args.model, n_cls, args.only_base, args.head,
                         args.dim)
    load_model(model, args.model_path, not args.only_base)
    if torch.cuda.is_available():
        torch.cuda.set_device(args.gpu)
        model = model.cuda()
        cudnn.benchmark = True

    evaluate(meta_valloader, model, args, "N-Way")
    if args.fg:
        evaluate(meta_fg_valloader, model, args, 'Fine-Grained')
    else:
        evaluate(meta_fg_valloader, model, args, 'All-Way')
Example #2
0
def main():
    args = parse_option()

    meta_fg_val_dataset, meta_val_dataset, n_cls = get_eval_datasets(args)

    meta_valloader = DataLoader(meta_val_dataset,
                                batch_size=args.test_batch_size, shuffle=False, drop_last=False,
                                num_workers=args.num_workers)
    meta_fg_valloader = DataLoader(meta_fg_val_dataset,
                                   batch_size=args.test_batch_size, shuffle=False, drop_last=False,
                                   num_workers=args.num_workers)
    # Assume model #1 is the first (coarse cls) model, only-base = False so the logits layer will be loaded
    # Assume model #2 is the second (intra-class cls) model, only-base = True for maximum performance
    only_bases = [False, True]
    mlps = [False, True]
    models = [create_model(model, n_cls, only_base, args.head, args.dim, mlp) for model, only_base, mlp in
              zip(args.model, only_bases, mlps)]
    [load_model(model, model_path, not only_base) for model, model_path, only_base in
     zip(models, args.model_path, only_bases)]

    if torch.cuda.is_available():
        torch.cuda.set_device(args.gpu)
        models = [model.cuda() for model in models]
        cudnn.benchmark = True

    # evalation
    evaluate(meta_valloader, models, args, "Regular")
    if args.fg:
        evaluate(meta_fg_valloader, models, args, 'Fine-Grained')
    else:
        evaluate(meta_fg_valloader, models, args, 'All-Way')
Example #3
0
def main():
    args = parse_option()
    args = args

    meta_fg_val_dataset, meta_val_dataset, n_cls = get_eval_datasets(args)

    meta_valloader = DataLoader(meta_val_dataset,
                                batch_size=args.test_batch_size, shuffle=False, drop_last=False,
                                num_workers=args.num_workers)
    meta_fg_valloader = DataLoader(meta_fg_val_dataset,
                                   batch_size=args.test_batch_size, shuffle=False, drop_last=False,
                                   num_workers=args.num_workers)
    # for simplicity, assume only-base=True for all models (it performs better)
    only_base = True
    models = [create_model(model, n_cls, only_base, args.head, args.dim) for model in args.model]
    [load_model(model, model_path, not only_base) for model, model_path in zip(models, args.model_path)]

    if torch.cuda.is_available():
        torch.cuda.set_device(args.gpu)
        models = [model.cuda() for model in models]
        cudnn.benchmark = True

    # evalation
    evaluate(meta_valloader, models, args, "Regular")
    if args.fg:
        evaluate(meta_fg_valloader, models, args, 'Fine-Grained')
    else:
        evaluate(meta_fg_valloader, models, args, 'All-Way')
Example #4
0
def main():
    args = parse_option()

    train_dataset, n_cls = get_datasets(args)

    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size, shuffle=True, drop_last=False,
                              num_workers=args.num_workers)

    model = create_model(args.model, n_cls, args.only_base, args.head, args.dim)
    load_model(model, args.model_path, not args.only_base)
    if torch.cuda.is_available():
        torch.cuda.set_device(args.gpu)
        model = model.cuda()
        cudnn.benchmark = True

    for i, (images, labels) in enumerate(train_loader):
        if args.gpu is not None:
            images = images.cuda(args.gpu)

        def attention_forward(encoder, imgs):
            # hard-coded forward because we need the feature-map and not the finalized feature
            x = encoder.conv1(imgs)
            x = encoder.bn1(x)
            x = encoder.relu(x)
            x = encoder.maxpool(x)
            x = encoder.layer1(x)
            x = encoder.layer2(x)
            x = encoder.layer3(x)
            feats = encoder.layer4(x)
            feats_as_batch = feats.permute((0, 2, 3, 1)).contiguous().view((-1, feats.shape[1]))
            # reminder: "fc" layer outputs: (feature, class logits)
            feats_as_batch = encoder.fc(feats_as_batch)[0]
            feats_as_batch = feats_as_batch.view(
                (feats.shape[0], feats.shape[2], feats.shape[3], feats_as_batch.shape[1]))
            feats_as_batch = feats_as_batch.permute((0, 3, 1, 2))
            return feats_as_batch

        f_q = attention_forward(model, images)
        localization(images, f_q, args.batch_size, batch_id=i, img_size=448)
        if i == 10:
            break
Example #5
0
def main():
    args = parse_option()

    meta_fg_val_dataset, meta_val_dataset, n_cls = get_tsne_datasets(args)

    meta_valloader = DataLoader(meta_val_dataset,
                                batch_size=args.test_batch_size,
                                shuffle=True,
                                drop_last=False,
                                num_workers=args.num_workers)
    meta_fg_valloader = DataLoader(meta_fg_val_dataset,
                                   batch_size=args.test_batch_size,
                                   shuffle=False,
                                   drop_last=False,
                                   num_workers=args.num_workers)

    model = create_model(args.model, n_cls, args.only_base, args.head,
                         args.dim)
    load_model(model, args.model_path, not args.only_base)
    if torch.cuda.is_available():
        torch.cuda.set_device(args.gpu)
        model = model.cuda()
        cudnn.benchmark = True

    is_norm = True
    all_features, all_ys, all_preds = get_features_and_preds_from_dataloader(
        model, iter(meta_valloader), is_norm=is_norm)

    all_coarse_gt = np.array([
        get_coarse_class_for_fine_gt(meta_val_dataset.coarse2fine, y)
        for y in all_ys
    ])
    print(
        f"Accuracy: {float(sum(all_preds == all_coarse_gt)) / len(all_preds)}")
    if isinstance(model.fc, torch.nn.Sequential):
        if args.head in ['fork', 'seq']:
            if is_norm:
                class_weights = F.normalize(
                    model.fc[2].fc2.weight).detach().cpu().numpy()
            else:
                class_weights = model.fc[2].fc2.weight.detach().cpu().numpy()
        else:
            if is_norm:
                class_weights = F.normalize(
                    model.fc[2].weight).detach().cpu().numpy()
            else:
                class_weights = model.fc[2].weight.detach().cpu().numpy()
    else:
        class_weights = F.normalize(model.fc.weight).detach().cpu().numpy()
    model_name = os.path.basename(os.path.dirname(args.model_path))
    tsne_with_weights(all_features, all_coarse_gt, class_weights,
                      f"{model_name}_{args.dataset}_coarse")
    intra_features, intra_ys, intra_preds = get_features_and_preds_from_dataloader(
        model, iter(meta_fg_valloader), is_norm=is_norm)
    bincount = np.bincount(intra_preds)
    coarse_class = np.argmax(bincount)
    print(f"{bincount} => Class: {coarse_class}")
    tsne_plot_with_arrows(intra_features,
                          intra_ys,
                          class_weights[coarse_class],
                          title=f"{model_name}_{args.dataset}_fine")