Ejemplo n.º 1
0
from torchbench.image_classification import ImageNet
from torchvision.models.resnet import resnext101_32x8d
import torchvision.transforms as transforms
import PIL

print("IT'S ALIVE!!!")

# Define the transforms need to convert ImageNet data to expected model input
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
input_transform = transforms.Compose([
    transforms.Resize(256, PIL.Image.BICUBIC),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    normalize,
])

# Run the benchmark
ImageNet.benchmark(model=resnext101_32x8d(pretrained=True),
                   paper_model_name='ResNeXt-101-32x8d',
                   paper_arxiv_id='1611.05431',
                   input_transform=input_transform,
                   batch_size=256,
                   num_gpu=1)
Ejemplo n.º 2
0
# Model 1
# Define the transforms need to convert ImageNet data to expected model input
input_transform = transforms.Compose([
    ECenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                          0.225]),
])
model = torch.hub.load('zhanghang1989/ResNeSt', 'resnest50', pretrained=True)

# Run the benchmark
ImageNet.benchmark(
    model=model,
    paper_model_name='ResNeSt-50',
    paper_arxiv_id='2004.08955',
    input_transform=input_transform,
    batch_size=32,
    num_gpu=1,
    model_description="Official weights from the authors of the paper.",
)
torch.cuda.empty_cache()

# Model 2
# Define the transforms need to convert ImageNet data to expected model input
input_transform = transforms.Compose([
    ECenterCrop(256),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                          0.225]),
])
model = torch.hub.load('zhanghang1989/ResNeSt', 'resnest101', pretrained=True)
Ejemplo n.º 3
0
    sd[key.replace('module.', '')] = checkpoint['state_dict'][key]
# Define the transforms need to convert ImageNet data to expected model input
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
input_transform = transforms.Compose(
    [transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean, std)])

args = config.load_cfg_from_cfg_file(config_path)
model = san(args.sa_type, args.layers, args.kernels, args.classes)
model.load_state_dict(sd)

# Run the benchmark
ImageNet.benchmark(
    model=model,
    paper_model_name='SAN10-pairwise',
    paper_arxiv_id='2004.13621',
    input_transform=input_transform,
    batch_size=256,
    num_gpu=1,
    paper_results={'Top 1 Accuracy': 0.749, 'Top 5 Accuracy': 0.921},
    model_description="Official weights from the authors of the paper.",
)
torch.cuda.empty_cache()

# Model 2
config_path = 'config/imagenet/imagenet_san10_patchwise.yaml'
file_id = '1aU60a3I-YZK1HYs25sj2V5nbXC9FqRZ5'
destination = './tmp/'
filename = 'imagenet_san10_patchwise.pth'
download_file_from_google_drive(file_id, destination, filename=filename)
checkpoint = torch.load(os.path.join(destination, filename))
sd = {}
for key in checkpoint['state_dict']:
Ejemplo n.º 4
0
urllib.request.urlretrieve(
    'https://dl.fbaipublicfiles.com/FixRes_data/FixRes_Pretrained_Models/ResNext101_32x48d_v2.pth',
    'ResNext101_32x48d_v2.pth')
pretrained_dict = torch.load('ResNext101_32x48d_v2.pth',
                             map_location='cpu')['model']

model_dict = model.state_dict()
for k in model_dict.keys():
    if (('module.' + k) in pretrained_dict.keys()):
        model_dict[k] = pretrained_dict.get(('module.' + k))
model.load_state_dict(model_dict)
# Run the benchmark
ImageNet.benchmark(
    model=model,
    paper_model_name='FixResNeXt-101 32x48d',
    paper_arxiv_id='1906.06423',
    input_transform=input_transform,
    batch_size=32,
    num_gpu=1,
    model_description="Official weights from the author's of the paper.")
torch.cuda.empty_cache()

#Model 2
# Define the transforms need to convert ImageNet data to expected model input
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
input_transform = transforms.Compose([
    Resize(int((256 / 224) * 480)),
    transforms.CenterCrop(480),
    transforms.ToTensor(),
    normalize,
])
Ejemplo n.º 5
0
model = fuse_bn_recursively(model)
model = model.cuda()
model = model.half()
model.eval()

# Run the benchmark
print('Benchmarking TResNet-M')
for i in range(1):  # Two times for caching
    ImageNet.benchmark(
        model=model,
        paper_model_name='TResNet-M-FP16',
        paper_arxiv_id='2003.13630',
        input_transform=val_tfms,
        batch_size=640,
        num_workers=args.num_workers,
        num_gpu=1,
        pin_memory=True,
        paper_results={
            'Top 1 Accuracy': 0.807,
            'Top 5 Accuracy': 0.948
        },
        model_description="Official weights from the author's of the paper.",
        send_data_to_device=upload_data_to_gpu)

del model
gc.collect()
torch.cuda.empty_cache()

# # #### TResNet-L-2 ####
# args.model_name = 'tresnet_l_v2'
# model_path = './tresnet_l_2.pth'
Ejemplo n.º 6
0
    img_size = 224
input_transform = transforms.Compose([
    ECenterCrop(img_size),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                          0.225]),
])
model = NATNet.build_from_config(net_config, pretrained=True)

# Run the benchmark
ImageNet.benchmark(
    model=model,
    paper_model_name='NAT-M1',
    paper_arxiv_id='2005.05859',
    input_transform=input_transform,
    batch_size=256,
    num_gpu=1,
    model_description="Official weights from the authors of the paper.",
    paper_results={
        'Top 1 Accuracy': 0.775,
        'Top 5 Accuracy': 0.935
    })
torch.cuda.empty_cache()

# Model 2
# Define the transforms need to convert ImageNet data to expected model input
net_config = json.load(open('subnets/imagenet/NAT-M2/net.config'))
if 'img_size' in net_config:
    img_size = net_config['img_size']
else:
    img_size = 224
input_transform = transforms.Compose([
Ejemplo n.º 7
0
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    normalize,
])
model = rexnetv1.ReXNetV1(width_mult=1.0)
model.load_state_dict(sd)
model.eval()

# Run the benchmark
ImageNet.benchmark(
    model=model,
    paper_model_name='ReXNetV1 1.0x',
    paper_arxiv_id='2007.00992',
    input_transform=input_transform,
    batch_size=256,
    num_gpu=1,
    paper_results={
        'Top 1 Accuracy': 0.779,
        'Top 5 Accuracy': 0.939
    },
    model_description="Official weights from the authors of the paper.",
)
torch.cuda.empty_cache()

# Model 2
file_id = '1x2ziK9Oyv66Y9NsxJxXsdjzpQF2uSJj0'
destination = './tmp/'
filename = 'rexnetv1_1.3x.pth'
download_file_from_google_drive(file_id, destination, filename=filename)
sd = torch.load(os.path.join(destination, filename),
                map_location=torch.device('cpu'))
Ejemplo n.º 8
0
# Define Transforms
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
b0_input_transform = transforms.Compose([
    transforms.Resize(256),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    normalize,
])

# Run Evaluation
ImageNet.benchmark(model=torch.hub.load('facebookresearch/WSL-Images',
                                        'resnext101_32x48d_wsl'),
                   paper_model_name='ResNeXt-101 32x48d',
                   paper_arxiv_id='1805.00932',
                   paper_pwc_id='exploring-the-limits-of-weakly-supervised',
                   input_transform=b0_input_transform,
                   batch_size=64,
                   num_gpu=1)

# Run Evaluation
ImageNet.benchmark(model=torch.hub.load('facebookresearch/WSL-Images',
                                        'resnext101_32x32d_wsl'),
                   paper_model_name='ResNeXt-101 32x32d',
                   paper_arxiv_id='1805.00932',
                   paper_pwc_id='exploring-the-limits-of-weakly-supervised',
                   input_transform=b0_input_transform,
                   batch_size=128,
                   num_gpu=1)

# Run Evaluation
Ejemplo n.º 9
0
from dpn import dpn68, dpn68b, dpn92, dpn98, dpn131, dpn107
from torchbench.image_classification import ImageNet
import torchvision.transforms as transforms
import PIL

normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
input_transform = transforms.Compose([
    transforms.Resize(256, PIL.Image.BICUBIC),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    normalize,
])
ImageNet.benchmark(model=dpn131(pretrained=True),
                   paper_model_name='DPN-131 x224',
                   paper_arxiv_id='1707.01629',
                   paper_pwc_id='dual-path-networks',
                   input_transform=input_transform,
                   batch_size=256,
                   num_gpu=1)
Ejemplo n.º 10
0
from torchbench.image_classification import ImageNet
from torchvision.models.resnet import resnext101_32x8d, ResNet34
import torchvision.transforms as transforms
import PIL

# Define the transforms need to convert ImageNet data to expected model input
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
input_transform = transforms.Compose([
    transforms.Resize(256, PIL.Image.BICUBIC),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    normalize,
])

# Run the benchmark
ImageNet.benchmark(model=ResNet34(pretrained=True),
                   paper_model_name='ResNet34',
                   paper_arxiv_id='1512.03385',
                   input_transform=input_transform,
                   batch_size=256,
                   num_gpu=1)
Ejemplo n.º 11
0
for m in model_list:
    model_name = m['model']
    # create model from name
    model = create_model(model_name, pretrained=True)
    param_count = sum([m.numel() for m in model.parameters()])
    print('Model %s, %s created. Param count: %d' % (model_name, m['paper_model_name'], param_count))

    # get appropriate transform for model's default pretrained config
    data_config = resolve_data_config(m['args'], model=model, verbose=True)
    if m['ttp']:
        model = TestTimePoolHead(model, model.default_cfg['pool_size'])
        data_config['crop_pct'] = 1.0
    input_transform = create_transform(**data_config)

    # Run the benchmark
    ImageNet.benchmark(
        model=model,
        model_description=m.get('model_description', None),
        paper_model_name=m['paper_model_name'],
        paper_arxiv_id=m['paper_arxiv_id'],
        input_transform=input_transform,
        batch_size=m['batch_size'],
        num_gpu=NUM_GPU,
        data_root=os.environ.get('IMAGENET_DIR', './.data/vision/imagenet')
    )

    torch.cuda.empty_cache()


Ejemplo n.º 12
0
    #wide_resnet101_2=_attrib(paper_model_name=, paper_arxiv_id=),  # same weights as torchvision
    xception=_attrib(paper_model_name='Xception', paper_arxiv_id='1610.02357'),
)

model_names = list_models(pretrained=True)

for model_name in model_names:
    if model_name not in model_map:
        print('Skipping %s' % model_name)
        continue

    # create model from name
    model = create_model(model_name, pretrained=True)
    param_count = sum([m.numel() for m in model.parameters()])
    print('Model %s created, param count: %d' % (model_name, param_count))

    # get appropriate transform for model's default pretrained config
    data_config = resolve_data_config(dict(), model=model, verbose=True)
    input_transform = create_transform(**data_config)

    # Run the benchmark
    ImageNet.benchmark(
        model=model,
        paper_model_name=model_map[model_name]['paper_model_name'],
        paper_arxiv_id=model_map[model_name]['paper_arxiv_id'],
        input_transform=input_transform,
        batch_size=model_map[model_name]['batch_size'],
        num_gpu=NUM_GPU,
        #data_root=DATA_ROOT
    )
Ejemplo n.º 13
0
def run_models(model_names=None, model_list=None, data_root=None, run=True, force=True):
    """
    local mod on sotabench
    """

    if model_list is None or data_root is None:
        print("model_list not found")
        print("data_root", data_root)
        return None
    out = []
    # models in bench
    

    for i, m in enumerate(model_list):
        model_name = m['model']
        if model_names is None or model_name in model_names:
            # create model from name

            model = create_model(model_name, pretrained=True)
            data_config = resolve_data_config(m['args'], model=model, verbose=True)
            if m['ttp']:
                model, _ = apply_test_time_pool(model, data_config)

            batch_size = m['batch_size']
            mean = data_config['mean']
            std = data_config['std']
            input_size = data_config['input_size']
            interpolation = data_config['interpolation']
            crop_pct = 1.0 if m['ttp'] else data_config['crop_pct']

            print("Model: %s (%s)"%(model_name, m['paper_model_name']))
            print(' params: %d'%sum([m.numel() for m in model.parameters()]))

            print("  batch_size\t", batch_size)
            print("  mean   \t", mean)
            print("  std    \t", std)
            print("  input_size\t", input_size)
            print("  interpolation\t", interpolation)
            print("  crop_pct\t", crop_pct)
            if model_names is None or not run:
                continue


            xform = create_transform(input_size=input_size,
                                     interpolation=interpolation,
                                     mean=mean,
                                     std=std,
                                     crop_pct=crop_pct,
                                     use_prefetcher=False)

            print(type(model), model.__class__, list(model.parameters())[0].dtype,
                    list(model.parameters())[0].device)
            print(type(xform))
            print(xform)

            # force Flag ensures local benchmark is computed regardless of the value in sotabench
            # requires https://github.com/xvdp/torchbench
            res = ImageNet.benchmark(model=model,
                                     paper_model_name=model_name,
                                     data_root=data_root,
                                     input_transform=xform,
                                     batch_size=batch_size,
                                     num_gpu=1,
                                     pin_memory=True,
                                     force=force)
            out.append(res)


            torch.cuda.empty_cache()
    return out
Ejemplo n.º 14
0
        return r[:-1] + ', largest={})'.format(self.largest)
      
# Define the transforms need to convert ImageNet data to expected model input
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
input_transform = transforms.Compose([
    Resize(int((256 / 224) * 320)),
    transforms.CenterCrop(320),
    transforms.ToTensor(),
    normalize,
])

model=resnext101_32x48d_wsl(progress=True) 

urllib.request.urlretrieve('https://dl.fbaipublicfiles.com/FixRes_data/FixRes_Pretrained_Models/ResNext101_32x48d_v2.pth', 'ResNext101_32x48d_v2.pth')
pretrained_dict=torch.load('ResNext101_32x48d_v2.pth',map_location='cpu')['model']

model_dict = model.state_dict()
for k in model_dict.keys():
    if(('module.'+k) in pretrained_dict.keys()):
        model_dict[k]=pretrained_dict.get(('module.'+k))
model.load_state_dict(model_dict)
# Run the benchmark
ImageNet.benchmark(
    model=model,
    paper_model_name='FixResNeXt-101 32x48d',
    paper_arxiv_id='1906.06423',
    input_transform=input_transform,
    batch_size=32,
    num_gpu=1
)
Ejemplo n.º 15
0
# Define the transforms need to convert ImageNet data to expected model input
input_transform = transforms.Compose([
    transforms.Resize(256),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    normalize,
])
model = models.__dict__['eca_resnet18'](k_size=[3, 5, 7, 7])
model.load_state_dict(sd)

# Run the benchmark
ImageNet.benchmark(
    model=model,
    paper_model_name='ECA-Net18',
    paper_arxiv_id='1910.03151',
    input_transform=input_transform,
    batch_size=256,
    num_gpu=1,
    paper_results={'Top 1 Accuracy': 0.7092, 'Top 5 Accuracy': 0.8993},
    model_description="Official weights from the authors of the paper.",
)
torch.cuda.empty_cache()

# Model 2
file_id = '15LV5Jkea3GPzvLP5__H7Gg88oNQUxBDE'
destination = './tmp/'
filename = 'eca_resnet34_k3357.pth.tar'
download_file_from_google_drive(file_id, destination, filename=filename)
checkpoint = torch.load(os.path.join(destination, filename))
sd = {}
for key in checkpoint['state_dict']:
    sd[key.replace('module.', '')] = checkpoint['state_dict'][key]
Ejemplo n.º 16
0
from torchvision.models.alexnet import alexnet
import torchvision.transforms as transforms
from torchbench.image_classification import ImageNet
import PIL
import torch

# Define Transforms
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
b0_input_transform = transforms.Compose([
    transforms.Resize(256),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    normalize,
])

# Run Evaluation
ImageNet.benchmark(model=alexnet(pretrained=True),
                   paper_model_name='AlexNet',
                   input_transform=b0_input_transform,
                   batch_size=256,
                   num_gpu=1)
Ejemplo n.º 17
0
val_bs = args.batch_size
val_tfms = transforms.Compose([
    transforms.Resize(int(args.input_size / args.val_zoom_factor)),
    transforms.CenterCrop(args.input_size)
])
val_tfms.transforms.append(transforms.ToTensor())

print('Benchmarking TResNet-L-V2')
# Run the benchmark
ImageNet.benchmark(model=model,
                   paper_model_name='TResNet-L-V2  (FP16)',
                   paper_arxiv_id='2003.13630',
                   input_transform=val_tfms,
                   send_data_to_device=send_data,
                   batch_size=560,
                   num_workers=args.num_workers,
                   num_gpu=1,
                   pin_memory=True,
                   paper_results={
                       'Top 1 Accuracy': 0.819,
                       'Top 5 Accuracy': 0.951
                   },
                   model_description="TResNet-L-V2.")

del model
gc.collect()
torch.cuda.empty_cache()

# TResNet-M
args.model_name = 'tresnet_m'
model_path = './tresnet_m.pth'
model = create_model(args)
    error, checksum, repo_release_tag, caption, paper, ds, img_size, scale, batch, rem = model_metainfo
    if (ds != "in1k") or (img_size == 0) or ((len(rem) > 0) and
                                             (rem[-1] == "*")):
        continue
    paper_model_name = caption
    paper_arxiv_id = paper
    input_image_size = img_size
    resize_inv_factor = scale
    batch_size = batch
    model_description = "pytorch" + (rem if rem == "" else ", " + rem)
    assert (not hasattr(net, "in_size")) or (input_image_size
                                             == net.in_size[0])
    ImageNet.benchmark(
        model=net,
        model_description=model_description,
        paper_model_name=paper_model_name,
        paper_arxiv_id=paper_arxiv_id,
        input_transform=transforms.Compose([
            transforms.Resize(
                int(math.ceil(float(input_image_size) / resize_inv_factor))),
            transforms.CenterCrop(input_image_size),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        ]),
        batch_size=batch_size,
        num_gpu=1,
        # data_root=os.path.join("..", "imgclsmob_data", "imagenet")
    )
    torch.cuda.empty_cache()
Ejemplo n.º 19
0
    if target is not None:
        target = target.to(device=device,
                           dtype=torch.float16,
                           non_blocking=non_blocking)

    return input, target


print('Benchmarking GENet-large-pro')
# Run the benchmark
ImageNet.benchmark(model=model,
                   paper_model_name='GENet-large-pro',
                   paper_arxiv_id='2006.14090',
                   input_transform=transformer,
                   send_data_to_device=send_data,
                   batch_size=256,
                   num_workers=8,
                   num_gpu=1,
                   pin_memory=True,
                   paper_results={'Top 1 Accuracy': 0.813},
                   model_description="GENet-large-pro")

del model
gc.collect()
torch.cuda.empty_cache()

# GENet-normal
file_id = '1rpL0BKI_l5Xg4vN5fHGXPzTna5kW9hfs'
destination = './GENet_params/'
filename = 'GENet_normal.pth'
download_file_from_google_drive(file_id, destination, filename=filename)
Ejemplo n.º 20
0
    _entry('xception', 'Xception', '1610.02357'),
]


for m in model_list:
    model_name = m['model']
    # create model from name
    model = create_model(model_name, pretrained=True)
    param_count = sum([m.numel() for m in model.parameters()])
    print('Model %s, %s created. Param count: %d' % (model_name, m['paper_model_name'], param_count))

    # get appropriate transform for model's default pretrained config
    data_config = resolve_data_config(m['args'], model=model, verbose=True)
    if m['ttp']:
        model = TestTimePoolHead(model, model.default_cfg['pool_size'])
        data_config['crop_pct'] = 1.0
    input_transform = create_transform(**data_config)

    # Run the benchmark
    ImageNet.benchmark(
        model=model,
        paper_model_name=m['paper_model_name'],
        paper_arxiv_id=m['paper_arxiv_id'],
        input_transform=input_transform,
        batch_size=m['batch_size'],
        num_gpu=NUM_GPU,
        data_root=os.environ.get('IMAGENET_DIR', './imagenet')
    )


Ejemplo n.º 21
0
                                 std=[0.229, 0.224, 0.225])
        ])
    else:
        model = create_model(model_name,
                             num_classes=1000,
                             in_chans=3,
                             pretrained=True)

        data_config = resolve_data_config({'img_size': input_size},
                                          model=model,
                                          verbose=True)
        data_config.update(img_size=data_config['input_size'][2])
        del data_config['input_size']
        if input_size > 224:
            model = TestTimePoolHead(model, model.default_cfg['pool_size'])
            data_config['crop_pct'] = 1.0
        input_transform = transforms_imagenet_eval(**data_config)

    # Run the benchmark
    ImageNet.benchmark(model=model,
                       paper_model_name=paper_name,
                       paper_arxiv_id='2001.06570',
                       paper_pwc_id='harmonic-convolutional-networks-based-on',
                       paper_results=paper_result,
                       input_transform=input_transform,
                       batch_size=256,
                       num_gpu=1,
                       data_root=('~/data/imagenet'))

torch.cuda.empty_cache()