コード例 #1
0
    def __init__(self, **kwargs):
        super(Model, self).__init__()

        self.epoch = 0
        self.log = True
        self.learning_rate = 0.015
        self.batch_size=12
        self.dropout_rate = 0
        self.pooling = 'ccop'
        self.num_classes = 682
        # self.dataset = 'OPEN_small'
        self.multi_network = False

        # self.train_path = data[self.dataset]['train_path']
        # self.valid_path = data[self.dataset]['val_path']
        # self.means = data[self.dataset]['mean']
        # self.stds = data[self.dataset]['std']
        
        self.training_correct_counter = 0
        self.training = False
        
        self.loss=nn.CrossEntropyLoss()
        
        self.std_thresh = 3
        self.std_thresh2 = 0.5
                
        mod1 = antialiased_cnns.resnet50(pretrained=True, filter_size=4)
        self.model1 = nn.Sequential(
            mod1.conv1,
            mod1.bn1,
            mod1.relu,
            mod1.maxpool,

            mod1.layer1,
            mod1.layer2,
            mod1.layer3,
            mod1.layer4,
        )
        if self.multi_network:
            mod2 = antialiased_cnns.resnet50(pretrained=True, filter_size=4)
            self.model2 = nn.Sequential(
                mod2.conv1,
                mod2.bn1,
                mod2.relu,
                mod2.maxpool,

                mod2.layer1,
                mod2.layer2,
                mod2.layer3,
                mod2.layer4,
            )
        self.fc = nn.Linear(4096, self.num_classes)
コード例 #2
0
def create(config=None):
    """Loads the Anti Alias model."""

    del config  # Unused argument

    with torch.set_grad_enabled(False):
        model = antialiased_cnns.resnet50(pretrained=True)
        model = model.eval()

    image_mean = [0.485, 0.456, 0.406]
    image_std = [0.229, 0.224, 0.225]

    def call(features):
        # Normalize according to the documentation. Note that the pre-processing
        # will already have the range normalized to [0, 1].
        images_normalized = (features["image"] - image_mean) / image_std

        # Reshape from [batch, h, w, c] -> [batch, c, h, w]
        images_torch = torch.tensor(
            np.transpose(images_normalized, [0, 3, 1, 2]).astype(np.float32))

        with torch.no_grad():
            logits = model(images_torch)
            return logits.softmax(dim=-1).cpu().numpy()

    preprocess_config = ("resize_small(256)|"
                         "central_crop(224)|"
                         "value_range(0,1)")
    preprocess_fn = pipeline_builder.get_preprocess_fn(preprocess_config,
                                                       remove_tpu_dtypes=False)
    return call, preprocess_fn
コード例 #3
0
    def __init__(self, **kwargs):
        super(Model, self).__init__()

        self.epoch = 0
        self.log = True
        self.learning_rate = 0.015
        self.batch_size=10
        self.dropout_rate = 0
        self.num_classes = 10
        # self.dataset = 'OPEN_small'
        self.multi_network = False

        # self.train_path = data[self.dataset]['train_path']
        # self.valid_path = data[self.dataset]['val_path']
        # self.means = data[self.dataset]['mean']
        # self.stds = data[self.dataset]['std']
        
        self.training_correct_counter = 0
        
        self.loss=nn.CrossEntropyLoss()
        
        self.std_thresh = 2
        self.std_thresh2 = 0.5
                

        mod1 = antialiased_cnns.resnet50(pretrained=True, filter_size=4)
        # model arch 
        self.trunk = nn.Sequential(
            mod1.conv1,
            mod1.bn1,
            mod1.relu,
            mod1.maxpool,

            mod1.layer1,
            mod1.layer2,
            mod1.layer3,
            mod1.layer4,
        )
        self.embedder = nn.Linear(2048, 10) # or 256?
        self.training=True
        self.running_outputs = np.array([])
        self.running_labels = np.array([])
コード例 #4
0
# Copyright (c) 2019, Adobe Inc. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike
# 4.0 International Public License. To view a copy of this license, visit
# https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode.

import antialiased_cnns

model = antialiased_cnns.resnet18(pretrained=True)
model = antialiased_cnns.resnet34(pretrained=True)
model = antialiased_cnns.resnet50(pretrained=True)
model = antialiased_cnns.resnet101(pretrained=True)
model = antialiased_cnns.resnet152(pretrained=True)
model = antialiased_cnns.wide_resnet50_2(pretrained=True)
model = antialiased_cnns.wide_resnet101_2(pretrained=True)
model = antialiased_cnns.resnext50_32x4d(pretrained=True)
model = antialiased_cnns.resnext101_32x8d(pretrained=True)
model = antialiased_cnns.alexnet(pretrained=True)
model = antialiased_cnns.vgg11(pretrained=True)
model = antialiased_cnns.vgg11_bn(pretrained=True)
model = antialiased_cnns.vgg13(pretrained=True)
model = antialiased_cnns.vgg13_bn(pretrained=True)
model = antialiased_cnns.vgg16(pretrained=True)
model = antialiased_cnns.vgg16_bn(pretrained=True)
model = antialiased_cnns.vgg19(pretrained=True)
model = antialiased_cnns.vgg19_bn(pretrained=True)
model = antialiased_cnns.densenet121(pretrained=True)
model = antialiased_cnns.densenet169(pretrained=True)
model = antialiased_cnns.densenet201(pretrained=True)
model = antialiased_cnns.densenet161(pretrained=True)
model = antialiased_cnns.mobilenet_v2(pretrained=True)
コード例 #5
0
ファイル: main.py プロジェクト: nihalsid/antialiased-cnns
def main_worker(gpu, ngpus_per_node, args):
    global best_acc1
    args.gpu = gpu

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)

    # create model
    print("=> creating model '{}'".format(args.arch))

    import antialiased_cnns.alexnet
    import antialiased_cnns.resnet
    import antialiased_cnns.vgg
    import antialiased_cnns.mobilenet
    import antialiased_cnns.densenet

    if (args.arch[:-1] == 'alexnet_lpf'):
        model = antialiased_cnns.alexnet(pretrained=args.pretrained,
                                         filter_size=int(args.arch[-1]))

    elif (args.arch[:-1] == 'vgg11_bn_lpf'):
        model = antialiased_cnns.vgg11_bn(pretrained=args.pretrained,
                                          filter_size=int(args.arch[-1]))
    elif (args.arch[:-1] == 'vgg13_bn_lpf'):
        model = antialiased_cnns.vgg13_bn(pretrained=args.pretrained,
                                          filter_size=int(args.arch[-1]))
    elif (args.arch[:-1] == 'vgg16_bn_lpf'):
        model = antialiased_cnns.vgg16_bn(pretrained=args.pretrained,
                                          filter_size=int(args.arch[-1]))
    elif (args.arch[:-1] == 'vgg19_bn_lpf'):
        model = antialiased_cnns.vgg19_bn(pretrained=args.pretrained,
                                          filter_size=int(args.arch[-1]))

    elif (args.arch[:-1] == 'vgg11_lpf'):
        model = antialiased_cnns.vgg11(pretrained=args.pretrained,
                                       filter_size=int(args.arch[-1]))
    elif (args.arch[:-1] == 'vgg13_lpf'):
        model = antialiased_cnns.vgg13(pretrained=args.pretrained,
                                       filter_size=int(args.arch[-1]))
    elif (args.arch[:-1] == 'vgg16_lpf'):
        model = antialiased_cnns.vgg16(pretrained=args.pretrained,
                                       filter_size=int(args.arch[-1]))
    elif (args.arch[:-1] == 'vgg19_lpf'):
        model = antialiased_cnns.vgg19(pretrained=args.pretrained,
                                       filter_size=int(args.arch[-1]))

    elif (args.arch[:-1] == 'resnet18_lpf'):
        model = antialiased_cnns.resnet.resnet18(pretrained=args.pretrained,
                                                 filter_size=int(
                                                     args.arch[-1]))
    elif (args.arch[:-1] == 'resnet34_lpf'):
        model = antialiased_cnns.resnet34(pretrained=args.pretrained,
                                          filter_size=int(args.arch[-1]))
    elif (args.arch[:-1] == 'resnet50_lpf'):
        model = antialiased_cnns.resnet50(pretrained=args.pretrained,
                                          filter_size=int(args.arch[-1]))
    elif (args.arch[:-1] == 'resnet101_lpf'):
        model = antialiased_cnns.resnet101(pretrained=args.pretrained,
                                           filter_size=int(args.arch[-1]))
    elif (args.arch[:-1] == 'resnet152_lpf'):
        model = antialiased_cnns.resnet152(pretrained=args.pretrained,
                                           filter_size=int(args.arch[-1]))
    elif (args.arch[:-1] == 'resnext50_32x4d_lpf'):
        model = antialiased_cnns.resnext50_32x4d(pretrained=args.pretrained,
                                                 filter_size=int(
                                                     args.arch[-1]))
    elif (args.arch[:-1] == 'resnext101_32x8d_lpf'):
        model = antialiased_cnns.resnext101_32x8d(pretrained=args.pretrained,
                                                  filter_size=int(
                                                      args.arch[-1]))

    elif (args.arch[:-1] == 'densenet121_lpf'):
        model = antialiased_cnns.densenet121(pretrained=args.pretrained,
                                             filter_size=int(args.arch[-1]))
    elif (args.arch[:-1] == 'densenet169_lpf'):
        model = antialiased_cnns.densenet169(pretrained=args.pretrained,
                                             filter_size=int(args.arch[-1]))
    elif (args.arch[:-1] == 'densenet201_lpf'):
        model = antialiased_cnns.densenet201(pretrained=args.pretrained,
                                             filter_size=int(args.arch[-1]))
    elif (args.arch[:-1] == 'densenet161_lpf'):
        model = antialiased_cnns.densenet161(pretrained=args.pretrained,
                                             filter_size=int(args.arch[-1]))

    elif (args.arch[:-1] == 'mobilenet_v2_lpf'):
        model = antialiased_cnns.mobilenet_v2(pretrained=args.pretrained,
                                              filter_size=int(args.arch[-1]))

    else:
        model = models.__dict__[args.arch](pretrained=args.pretrained)

    if args.weights is not None:
        print("=> using saved weights [%s]" % args.weights)
        weights = torch.load(args.weights)
        model.load_state_dict(weights['state_dict'])

    if args.distributed:
        # For multiprocessing distributed, DistributedDataParallel constructor
        # should always set the single device scope, otherwise,
        # DistributedDataParallel will use all available devices.
        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            model.cuda(args.gpu)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs we have
            args.batch_size = int(args.batch_size / ngpus_per_node)
            args.workers = int(args.workers / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(
                model, device_ids=[args.gpu])
        else:
            model.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            model = torch.nn.parallel.DistributedDataParallel(model)
    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
    else:
        # DataParallel will divide and allocate batch_size to all available GPUs
        if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda(args.gpu)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'], strict=False)
            if ('optimizer' in checkpoint.keys()
                ):  # if no optimizer, then only load weights
                args.start_epoch = checkpoint['epoch']
                best_acc1 = checkpoint['best_acc1']
                if args.gpu is not None:
                    # best_acc1 may be from a checkpoint from a different GPU
                    best_acc1 = best_acc1.to(args.gpu)
                optimizer.load_state_dict(checkpoint['optimizer'])
            else:
                print('  No optimizer saved')
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    mean = [0.485, 0.456, 0.406]
    std = [0.229, 0.224, 0.225]
    normalize = transforms.Normalize(mean=mean, std=std)

    if (args.no_data_aug):
        train_dataset = datasets.ImageFolder(
            traindir,
            transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                normalize,
            ]))
    else:
        train_dataset = datasets.ImageFolder(
            traindir,
            transforms.Compose([
                transforms.RandomResizedCrop(224),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                normalize,
            ]))

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=(train_sampler is None),
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=train_sampler)

    crop_size = 256 if (args.evaluate_shift or args.evaluate_diagonal
                        or args.evaluate_save) else 224
    args.batch_size = 1 if (args.evaluate_diagonal
                            or args.evaluate_save) else args.batch_size

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(crop_size),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    if (args.val_debug):  # debug mode - train on val set for faster epochs
        train_loader = val_loader

    if (args.embed):
        embed()

    if args.save_weights is not None:  # "deparallelize" saved weights
        print("=> saving 'deparallelized' weights [%s]" % args.save_weights)
        # TO-DO: automatically save this during training
        if args.gpu is not None:
            torch.save({'state_dict': model.state_dict()}, args.save_weights)
        else:
            if (args.arch[:7] == 'alexnet' or args.arch[:3] == 'vgg'):
                model.features = model.features.module
                torch.save({'state_dict': model.state_dict()},
                           args.save_weights)
            else:
                torch.save({'state_dict': model.module.state_dict()},
                           args.save_weights)
        return

    if args.evaluate:
        validate(val_loader, model, criterion, args)
        return

    if (args.evaluate_shift):
        validate_shift(val_loader, model, args)
        return

    if (args.evaluate_diagonal):
        validate_diagonal(val_loader, model, args)
        return

    if (args.evaluate_save):
        validate_save(val_loader, mean, std, args)
        return

    if (args.cos_lr):
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer, args.epochs)
        for epoch in range(args.start_epoch):
            scheduler.step()

    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)

        if (not args.cos_lr):
            adjust_learning_rate(optimizer, epoch, args)
        else:
            scheduler.step()
            print('[%03d] %.5f' % (epoch, scheduler.get_lr()[0]))

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, args)

        # evaluate on validation set
        acc1 = validate(val_loader, model, criterion, args)

        # remember best acc@1 and save checkpoint
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)

        if not args.multiprocessing_distributed or (
                args.multiprocessing_distributed
                and args.rank % ngpus_per_node == 0):
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'best_acc1': best_acc1,
                    'optimizer': optimizer.state_dict(),
                },
                is_best,
                epoch,
                out_dir=args.out_dir)
コード例 #6
0
# Copyright (c) 2019, Adobe Inc. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike
# 4.0 International Public License. To view a copy of this license, visit
# https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode.

import antialiased_cnns

filter_size = 4  # can be 2,3,4,5
pretrained = True

model = antialiased_cnns.resnet18(pretrained=pretrained,
                                  filter_size=filter_size)
model = antialiased_cnns.resnet34(pretrained=pretrained,
                                  filter_size=filter_size)
model = antialiased_cnns.resnet50(pretrained=pretrained,
                                  filter_size=filter_size)
model = antialiased_cnns.resnet101(pretrained=pretrained,
                                   filter_size=filter_size)

model = antialiased_cnns.alexnet(pretrained=pretrained,
                                 filter_size=filter_size)

model = antialiased_cnns.vgg16(pretrained=pretrained, filter_size=filter_size)
model = antialiased_cnns.vgg16_bn(pretrained=pretrained,
                                  filter_size=filter_size)

model = antialiased_cnns.densenet121(pretrained=pretrained,
                                     filter_size=filter_size)

model = antialiased_cnns.mobilenet_v2(pretrained=pretrained,
                                      filter_size=filter_size)