Exemple #1
0
	def get_network(self):
		args = self.args
		if args.network == 'resnet18_cifar':
			network = ResNet18_cifar(256, dropout=args.dropout, non_linear_head=args.nonlinearhead, mlpbn=True, non_linear_head=True)
			target_network = ResNet18_cifar(256, dropout=args.dropout, non_linear_head=args.nonlinearhead, mlpbn=True, non_linear_head=True)
		elif args.network == 'resnet50_cifar':
			network = ResNet50_cifar(256, dropout=args.dropout, mlpbn=True, non_linear_head=True)
			target_network = ResNet50_cifar(256, dropout=args.dropout, mlpbn=True, non_linear_head=True)
		elif args.network == 'resnet18':
			network = resnet18(non_linear_head=args.nonlinearhead, mlpbn=True, non_linear_head=True)
			target_network = resnet18(non_linear_head=args.nonlinearhead, mlpbn=True, non_linear_head=True)
		elif args.network == 'resnet50':
			network = resnet50(non_linear_head=args.nonlinearhead, mlpbn=True, non_linear_head=True)
			target_network = resnet50(non_linear_head=args.nonlinearhead, mlpbn=True, non_linear_head=True)
		self.network = nn.DataParallel(network, device_ids=self.device_ids)
		self.network.to(self.device)
		self.target_network = nn.DataParallel(target_network, device_ids=self.device_ids)
		self.target_network.to(self.device)
		self.predictor = nn.Sequential(
						nn.Linear(256, 256),
						nn.BatchNorm1d(256),
						nn.ReLU(inplace=True),
						nn.Linear(256, 256),
		)
		self.predictor = nn.DataParallel(self.predictor, device_ids=self.device_ids)
		self.predictor.to(self.device)
Exemple #2
0
def get_student_model(opt):
    student = None
    student_key = None
    if opt.student_arch == 'alexnet':
        student = alexnet()
        student.fc = nn.Sequential()
        student_key = alexnet()
        student_key.fc = nn.Sequential()

    elif opt.student_arch == 'mobilenet':
        student = mobilenet()
        student.fc = nn.Sequential()
        student_key = mobilenet()
        student_key.fc = nn.Sequential()

    elif opt.student_arch == 'resnet18':
        student = resnet18()
        student.fc = nn.Sequential()
        student_key = resnet18()
        student_key.fc = nn.Sequential()

    elif opt.student_arch == 'resnet50':
        student = resnet50(fc_dim=8192)
        student_key = resnet50(fc_dim=8192)

    return student, student_key
Exemple #3
0
 def build_model(self) -> None:
     if self.params.pretrained:
         model = resnet18(pretrained=True)
         model.fc = nn.Linear(512, len(self.classes))
     else:
         model = resnet18(pretrained=False, num_classes=len(self.classes))
     return model
Exemple #4
0
    def build_model(self) -> nn.Module:
        if self.params.pretrained:
            model = resnet18(pretrained=True)

            # model is pretrained on ImageNet changing classes to CIFAR
            model.fc = nn.Linear(512, len(self.classes))
        else:
            model = resnet18(pretrained=False, num_classes=len(self.classes))
        return model
Exemple #5
0
def initialize_model(model_name, num_dense_layers=2, dropout=0):
    '''
    Initialise a model with a custom head
    to predict both sequence length and digits

    Parameters
    ----------
    model_name : str
        Model Name can be either:
        ResNet
        VGG
        ConvNet
        BaselineCNN_dropout

    Returns
    -------
    model : object
        The model to be initialize

    '''

    if model_name[:3] == "VGG":
        model = VGG(model_name, num_classes=7)
        model.classifier = CustomHead(512)

    elif model_name[:6] == "ResNet":
        if model_name == "ResNet18":
            model = resnet18(num_classes=7)
            model.linear = CustomHead(512)

        elif model_name == "ResNet34":
            model = resnet18(num_classes=7)
            model.linear = CustomHead(512)

        elif model_name == "ResNet50":
            model = resnet50(num_classes=7)
            model.linear = CustomHead(512 * 4)

        elif model_name == "ResNet101":
            model = resnet101(num_classes=7)
            model.linear = CustomHead(512 * 4)

        elif model_name == "ResNet152":
            model = resnet152(num_classes=7)
            model.linear = CustomHead(512 * 4)

    elif model_name == "ConvNet":
        model = ConvModel(num_dense_layers=num_dense_layers, dropout=dropout)

    elif model_name == "BaselineCNNdropout":
        model = BaselineCNNdropout(num_classes=7, p=dropout)
        model.fc2 = CustomHead(4096)

    return model
Exemple #6
0
    def __init__(self, args, device):
        self.args = args
        self.device = device
        if args.network == 'resnet18':
            model = resnet18(pretrained=self.args.pretrained,
                             classes=args.n_classes)
        elif args.network == 'resnet50':
            model = resnet50(pretrained=self.args.pretrained,
                             classes=args.n_classes)
        else:
            model = resnet18(pretrained=self.args.pretrained,
                             classes=args.n_classes)
        self.model = model.to(device)

        if args.resume:
            if isfile(args.resume):
                print(f"=> loading checkpoint '{args.resume}'")
                checkpoint = torch.load(args.resume)
                self.args.start_epoch = checkpoint['epoch']
                self.model.load_state_dict(checkpoint['model'])
                print(
                    f"=> loaded checkpoint '{args.resume}' (epoch {checkpoint['epoch']})"
                )
            else:
                raise ValueError(f"Failed to find checkpoint {args.resume}")

        self.source_loader, self.val_loader = data_helper.get_train_dataloader(
            args, patches=model.is_patch_based())
        # self.target_loader = data_helper.get_val_dataloader(args, patches=model.is_patch_based())
        self.target_loader = data_helper.get_tgt_dataloader(
            self.args, patches=model.is_patch_based())
        self.test_loaders = {
            "val": self.val_loader,
            "test": self.target_loader
        }
        self.len_dataloader = len(self.source_loader)
        print("Dataset size: train %d, val %d, test %d" %
              (len(self.source_loader.dataset), len(
                  self.val_loader.dataset), len(self.target_loader.dataset)))
        self.optimizer, self.scheduler = get_optim_and_scheduler(
            model,
            args.epochs,
            args.learning_rate,
            args.train_all,
            nesterov=args.nesterov)
        self.n_classes = args.n_classes
        if args.target in args.source:
            self.target_id = args.source.index(args.target)
            print("Target in source: %d" % self.target_id)
            print(args.source)
        else:
            self.target_id = None
        self.topk = [0 for _ in range(3)]
Exemple #7
0
    def __init__(self, model='resnet18'):

        super(VOSNet, self).__init__()
        self.model = model

        if model == 'resnet18':
            resnet = resnet18(pretrained=True)
            self.backbone = nn.Sequential(*list(resnet.children())[0:8])
        elif model == 'resnet50':
            resnet = resnet50(pretrained=True)
            self.backbone = nn.Sequential(*list(resnet.children())[0:8])
            self.adjust_dim = nn.Conv2d(1024,
                                        256,
                                        kernel_size=1,
                                        stride=1,
                                        padding=0,
                                        bias=False)
            self.bn256 = nn.BatchNorm2d(256)
        elif model == 'resnet101':
            resnet = resnet101(pretrained=True)
            self.backbone = nn.Sequential(*list(resnet.children())[0:8])
            self.adjust_dim = nn.Conv2d(1024,
                                        256,
                                        kernel_size=1,
                                        stride=1,
                                        padding=0,
                                        bias=False)
            self.bn256 = nn.BatchNorm2d(256)
        else:
            raise NotImplementedError
Exemple #8
0
def generate_combined_model(opt, num_new_classes):
    old_model = resnet.resnet18(num_classes=opt.num_classes,
                                shortcut_type='A',
                                sample_size=opt.sample_size,
                                sample_duration=opt.sample_duration)
    if not opt.no_cuda:
        old_model = old_model.cuda()
        old_model = nn.DataParallel(old_model, device_ids=None)
        pytorch_total_params = sum(p.numel() for p in old_model.parameters()
                                   if p.requires_grad)
        print('Total number of trainable parameters {}'.format(
            pytorch_total_params))
        if opt.pretrain:
            assert opt.pretrain_path is not None, 'Please give the pretrained path!'
            print('loading pretrained model {}'.format(opt.pretrain_path))
            pretrain = torch.load(opt.pretrain_path)
            prec1 = pretrain['best_prec1']
            old_model.load_state_dict(pretrain['state_dict'])
        else:
            raise 'Use your best model!'
    else:
        raise 'Buy a GPU please!'

    model = CombinedModel(old_model, num_new_classes, opt)
    model.new_fc = model.new_fc.cuda()

    return model, model.parameters(), prec1
Exemple #9
0
 def get_network(self):
     args = self.args
     if args.network == 'alexnet':
         network = alexnet(128)
     if args.network == 'alexnet_cifar':
         network = AlexNet_cifar(128)
     elif args.network == 'resnet18_cifar':
         network = ResNet18_cifar(128,
                                  dropout=args.dropout,
                                  non_linear_head=args.nonlinearhead,
                                  mlpbn=True)
     elif args.network == 'resnet50_cifar':
         network = ResNet50_cifar(128, dropout=args.dropout, mlpbn=True)
     elif args.network == 'wide_resnet28':
         network = WideResNetInstance(28, 2)
     elif args.network == 'resnet18':
         network = resnet18(non_linear_head=args.nonlinearhead, mlpbn=True)
     elif args.network == 'pre-resnet18':
         network = PreActResNet18(128)
     elif args.network == 'resnet50':
         network = resnet50(non_linear_head=args.nonlinearhead, mlpbn=True)
     elif args.network == 'pre-resnet50':
         network = PreActResNet50(128)
     elif args.network == 'shufflenet':
         network = shufflenet_v2_x1_0(num_classes=128,
                                      non_linear_head=args.nonlinearhead)
     self.network = nn.DataParallel(network, device_ids=self.device_ids)
     self.network.to(self.device)
def main():
    device='cuda'
    batch_size = 256
    normalize = {
        'mean': [0.485, 0.456, 0.406],
        'std': [0.229, 0.224, 0.225]
    }
    lr = 1e-1
    epochs = 200
    scaler = torch.cuda.amp.GradScaler()

    # transform = common_train((224, 224))
    trainset = ImageNet(os.environ['DATAROOT'], transform=common_train((224, 224)), train=True, subset=50)
    testset = ImageNet(os.environ['DATAROOT'], transform=common_test((224, 224)), train=False, subset=50)
    train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=False, num_workers=12)
    test_loader = DataLoader(testset, batch_size=batch_size, shuffle=True, pin_memory=False, num_workers=12)


    model = resnet18(**normalize, class_num=50).to(device)
    model = nn.DataParallel(model, device_ids=list(range(torch.cuda.device_count())))
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=1e-4, nesterov=True)
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, epochs, 1e-4, -1)
    # scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[30, 60, 90], gamma=0.1, last_epoch=-1)

    runner = Runner(model, train_loader, test_loader, criterion, optimizer, scheduler, scaler, epochs, 10)
    
    tqdm.write("Start training with Resnet18.")
    runner.train()
Exemple #11
0
def get_model(opt):

    model = resnet.resnet18(num_classes=7,
                            shortcut_type='A',
                            sample_size=112,
                            sample_duration=16)

    if not opt.no_cuda:
        model = model.cuda()
        model = nn.DataParallel(model, device_ids=None)

        if opt.model_path:
            print('loading trained model {}'.format(opt.model_path))
            pretrain = torch.load(opt.model_path)

            model.load_state_dict(pretrain['state_dict'])

    else:
        if opt.model_path:
            print('loading trained model {}'.format(opt.model_path))
            pretrain = torch.load(opt.model_path, map_location='cpu')

            from collections import OrderedDict
            new_state_dict = OrderedDict()
            for k, v in pretrain['state_dict'].items():
                name = k[7:]  # remove `module.`
                new_state_dict[name] = v
            # load params
            model.load_state_dict(new_state_dict)

    return model
    def get_network(self):
        if self.args.network == 'resnet50':
            self.network = resnet50()
            self.classifier = nn.Linear(2048, args.n_cls).to(self.device)
        elif self.args.network == 'resnet18':
            self.network = resnet18()
            self.classifier = nn.Linear(512, args.n_cls).to(self.device)
        else:
            raise NotImplementedError
        self.network = nn.DataParallel(self.network,
                                       device_ids=self.device_ids)
        self.network.to(self.device)
        ckpt = torch.load(self.args.pretrain_path)
        # self.network.load_state_dict(ckpt['state_dict'])
        state_dict = ckpt['state_dict']
        valid_state_dict = {
            k: v
            for k, v in state_dict.items()
            if k in self.network.state_dict() and 'fc.' not in k
        }
        for k, v in self.network.state_dict().items():
            if k not in valid_state_dict:
                logging.info('{}: Random Init'.format(k))
                valid_state_dict[k] = v
        # logging.info(valid_state_dict.keys())
        self.network.load_state_dict(valid_state_dict)

        if self.args.fc_only:
            for param in self.network.parameters():
                param.requires_grad = False
Exemple #13
0
def generate_model(opt):
    assert opt.model_name in ['resnet']

    if opt.model_name == 'resnet':
        assert opt.mode in ['score', 'feature']
        if opt.mode == 'score':
            last_fc = True
        elif opt.mode == 'feature':
            last_fc = False

        assert opt.model_depth in [18, 34, 50, 101]

        if opt.model_depth == 18:
            model = resnet.resnet18(num_classes=opt.n_classes,
                                    shortcut_type=opt.resnet_shortcut,
                                    last_fc=last_fc)
        elif opt.model_depth == 34:
            model = resnet.resnet34(num_classes=opt.n_classes,
                                    shortcut_type=opt.resnet_shortcut,
                                    last_fc=last_fc)
        elif opt.model_depth == 50:
            model = resnet.resnet50(num_classes=opt.n_classes,
                                    shortcut_type=opt.resnet_shortcut,
                                    last_fc=last_fc)
        elif opt.model_depth == 101:
            model = resnet.resnet101(num_classes=opt.n_classes,
                                     shortcut_type=opt.resnet_shortcut,
                                     last_fc=last_fc)

    if not opt.no_cuda:
        model = model.cuda()
        model = nn.DataParallel(model, device_ids=None)

    return model
Exemple #14
0
def get_model(train_model):

    if train_model == 'resnet18':
        return resnet.resnet18()
    elif train_model == 'resnet34':
        return resnet.resnet34()
    elif train_model == 'resnet50':
        return resnet.resnet50()
    elif train_model == 'resnet101':
        return resnet.resnet101()
    elif train_model == 'resnet152':
        return resnet.resnet152()
    elif train_model == 'resnet18_copy':
        return resnet_copy.resnet18()
    elif train_model == 'resnet34_copy':
        return resnet_copy.resnet34()
    elif train_model == 'resnet50_copy':
        return resnet_copy.resnet50()
    elif train_model == 'resnet101_copy':
        return resnet_copy.resnet101()
    elif train_model == 'resnet152':
        return resnet_copy.resnet152()
    elif train_model == 'vgg11':
        return vgg11()
    elif train_model == 'vgg13':
        return vgg13()
    elif train_model == 'vgg16':
        return vgg16()
    elif train_model == 'vgg19':
        return vgg19()
    elif train_model == 'nin':
        return nin()
    elif train_model == 'googlenet':
        return googlenet()
Exemple #15
0
def select_model(model_def):
    if model_def.lower() == 'hopenet':
        model = HopeNet()
        print('HopeNet is loaded')
    elif model_def.lower() == 'resnet10':
        model = resnet10(pretrained=False, num_classes=29 * 2)
        print('ResNet10 is loaded')
    elif model_def.lower() == 'resnet18':
        model = resnet18(pretrained=False, num_classes=29 * 2)
        print('ResNet18 is loaded')
    elif model_def.lower() == 'resnet50':
        model = resnet50(pretrained=False, num_classes=29 * 2)
        print('ResNet50 is loaded')
    elif model_def.lower() == 'resnet101':
        model = resnet101(pretrained=False, num_classes=29 * 2)
        print('ResNet101 is loaded')
    elif model_def.lower() == 'graphunet':
        model = GraphUNet(in_features=2, out_features=3)
        print('GraphUNet is loaded')
    elif model_def.lower() == 'graphnet':
        model = GraphNet(in_features=2, out_features=3)
        print('GraphNet is loaded')
    else:
        raise NameError('Undefined model')
    return model
Exemple #16
0
 def __init__(self, args, device):
     self.args = args
     self.device = device
     model = resnet18(pretrained=True, classes=args.n_classes)
     self.model = model.to(device)
     # print(self.model)
     self.source_loader, self.val_loader = data_helper.get_train_dataloader(
         args, patches=model.is_patch_based())
     self.target_loader = data_helper.get_val_dataloader(
         args, patches=model.is_patch_based())
     self.test_loaders = {
         "val": self.val_loader,
         "test": self.target_loader
     }
     self.len_dataloader = len(self.source_loader)
     print("Dataset size: train %d, val %d, test %d" %
           (len(self.source_loader.dataset), len(
               self.val_loader.dataset), len(self.target_loader.dataset)))
     self.optimizer, self.scheduler = get_optim_and_scheduler(
         model,
         args.epochs,
         args.learning_rate,
         args.train_all,
         nesterov=args.nesterov)
     self.n_classes = args.n_classes
     if args.target in args.source:
         self.target_id = args.source.index(args.target)
         print("Target in source: %d" % self.target_id)
         print(args.source)
     else:
         self.target_id = None
def network_config(args):
    if args.network == 'resnet34':
        network = resnet34()
        print('-------> Creating network resnet-34')
    else:
        network = resnet18()
        print('-------> Creating network resnet-18')
    network = torch.nn.DataParallel(network).cuda()
    print('Total params: %2.fM' %
          (sum(p.numel() for p in network.parameters()) / 1000000.0))
    cudnn.benchmark = True
    optimizer = optim.SGD(network.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)
    #device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    use_cuda = torch.cuda.is_available()

    # Random seed
    manualSeed = random.randint(1, 10000)
    random.seed(manualSeed)
    np.random.seed(manualSeed)
    torch.manual_seed(manualSeed)
    if use_cuda:
        torch.cuda.manual_seed_all(manualSeed)

    return network, optimizer, use_cuda
def get_network(args, model_args, use_gpu=True):
    """ return given network
    """
    if args.net == 'resnet18':
        from models.resnet import resnet18
        net = resnet18(**model_args)
    elif args.net == 'resnet34':
        from models.resnet import resnet34
        net = resnet34(**model_args)
    elif args.net == 'resnet50':
        from models.resnet import resnet50
        net = resnet50(**model_args)
    elif args.net == 'resnet101':
        from models.resnet import resnet101
        net = resnet101(**model_args)
    elif args.net == 'resnet152':
        from models.resnet import resnet152
        net = resnet152(**model_args)
    else:
        print('the network name you have entered is not supported yet')
        sys.exit()
    if use_gpu:
        net = net.cuda()

    return net
    def model_selection(self):
        if self.config.model_name == "resnet18":
            return resnet18(in_channels=int(self.input_shape[0]),
                            num_classes=self.label_num)
        elif self.config.model_name == "resnet34":
            return resnet34(in_channels=int(self.input_shape[0]),
                            num_classes=self.label_num)
        elif self.config.model_name == "resnet50":
            return resnet50(in_channels=int(self.input_shape[0]),
                            num_classes=self.label_num)
        elif self.config.model_name == "resnet101":
            return resnet101(in_channels=int(self.input_shape[0]),
                             num_classes=self.label_num)
        elif self.config.model_name == "resnet152":
            return resnet152(in_channels=int(self.input_shape[0]),
                             num_classes=self.label_num)
        elif self.config.model_name == "convnet":
            return convnet(in_channels=int(self.input_shape[0]),
                           num_classes=self.label_num)

        elif self.config.model_name == "resnet50":
            return wide_resnet50_2(in_channels=int(self.input_shape[0]),
                                   num_classes=self.label_num)
        elif self.config.model_name == "resnet101":
            return wide_resnet101_2(in_channels=int(self.input_shape[0]),
                                    num_classes=self.label_num)
Exemple #20
0
def main():
    transform_test = transforms.Compose([
        transforms.ToTensor(),
    ])

    test_datasets = CIFAR10C(root=args.data,
                             transform=transform_test,
                             attack_type=args.attack_type)
    test_loader = torch.utils.data.DataLoader(test_datasets,
                                              batch_size=args.batch_size,
                                              shuffle=False)

    # init model, ResNet18() can be also used here for training
    model = resnet18(num_classes=10).to(device)

    assert args.checkpoint != ''

    checkpoint = torch.load(args.checkpoint, map_location="cpu")
    if 'state_dict' in checkpoint:
        state_dict = checkpoint['state_dict']
    else:
        state_dict = checkpoint

    model.load_state_dict(state_dict)
    log.info('read checkpoint {}'.format(args.checkpoint))

    _, tacc = eval_test(model, device, test_loader, log)
    log.info("For attack type {}, tacc is {}".format(args.attack_type, tacc))
    return
Exemple #21
0
def main():
    device = torch.device("cuda")

    model = resnet.resnet18()
    print(model)

    num_classes = 13938
    easy_margin = False
    metric_fc = metrics.ArcMarginProduct(512,
                                         num_classes,
                                         s=30,
                                         m=0.5,
                                         easy_margin=easy_margin)

    model.to(device)
    model = DataParallel(model)
    metric_fc.to(device)
    metric_fc = DataParallel(metric_fc)

    lr = 1e-1  # initial learning rate
    lr_step = 10
    weight_decay = 5e-4
    optimizer = torch.optim.SGD([{
        'params': model.parameters()
    }, {
        'params': metric_fc.parameters()
    }],
                                lr=lr,
                                weight_decay=weight_decay)
    scheduler = StepLR(optimizer, step_size=lr_step, gamma=0.1)
Exemple #22
0
def create_model(num_classes, args):
    if args.network == 100:
        model = resnet.resnet18(pretrained=args.pretrain)
        num_ftrs = model.fc.in_features
        model.fc = nn.Sequential(nn.Dropout(0.5),
                                 nn.Linear(num_ftrs, num_classes))
    elif args.network == 101:
        model = resnet.resnet50(pretrained=args.pretrain)
        num_ftrs = model.fc.in_features
        model.fc = nn.Sequential(nn.Dropout(0.5),
                                 nn.Linear(num_ftrs, num_classes))
    elif args.network == 102:
        architecture = os.path.basename(args.bit_model)
        model = resnetv2.KNOWN_MODELS[architecture.split('.')[0]](
            head_size=num_classes, zero_head=True)
        model.load_from(np.load(args.bit_model))
        print(f'Load pre-trained model {args.bit_model}')
    elif args.network == 103:
        model = resnet.resnet101(pretrained=args.pretrain)
        num_ftrs = model.fc.in_features
        model.fc = nn.Sequential(nn.Dropout(0.5),
                                 nn.Linear(num_ftrs, num_classes))
    elif args.network == 104:
        model = microsoftvision.resnet50(pretrained=True)
        model.fc = model.fc = nn.Sequential(nn.Dropout(0.5),
                                            nn.Linear(2048, num_classes))
    else:
        print('model not available! Using PyTorch ResNet50 as default')
        model = resnet.resnet50(pretrained=args.pretrain)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, num_classes)

    return model
def define_model(is_resnet, is_densenet, is_senet):
    use18 = True # True
    if is_resnet:
        if not use18:
            original_model = resnet.resnet18(pretrained = True)
            Encoder = modules.E_resnet(original_model) 
            model = net.model(Encoder, num_features=512, block_channel = [64, 128, 256, 512])
        else:
            stereoModel = Resnet18Encoder(3)  
            model_dict = stereoModel.state_dict()
            encoder_dict = torch.load('./models/monodepth_resnet18_001.pth',map_location='cpu' )
            new_dict = {}
            for key in encoder_dict:
                if key in model_dict:
                    new_dict[key] = encoder_dict[key]

            stereoModel.load_state_dict(new_dict )
            Encoder = stereoModel
            model = net.model(Encoder, num_features=512, block_channel = [64, 128, 256, 512])      
          
    if is_densenet:
        original_model = densenet.densenet161(pretrained=True)
        Encoder = modules.E_densenet(original_model)
        model = net.model(Encoder, num_features=2208, block_channel = [192, 384, 1056, 2208])
    if is_senet:
        original_model = senet.senet154(pretrained='imagenet')
        Encoder = modules.E_senet(original_model)
        model = net.model(Encoder, num_features=2048, block_channel = [256, 512, 1024, 2048])

    return model
Exemple #24
0
    def __init__(self,
                 resnet_in_channels=1,
                 resnet_layers=18,
                 ief_iters=3):
        """
        :param resnet_in_channels: 1 if input silhouette/segmentation, 1 + num_joints if
        input silhouette/segmentation + joints.
        :param resnet_layers: number of layers in ResNet backbone (18 or 50)
        :param ief_iters: number of IEF iterations.
        """
        super(SingleInputRegressor, self).__init__()

        num_pose_params = 24*6
        num_output_params = 3 + num_pose_params + 10

        if resnet_layers == 18:
            self.image_encoder = resnet18(in_channels=resnet_in_channels,
                                          pretrained=False)
            self.ief_module = IEFModule([512, 512],
                                        512,
                                        num_output_params,
                                        iterations=ief_iters)
        elif resnet_layers == 50:
            self.image_encoder = resnet50(in_channels=resnet_in_channels,
                                          pretrained=False)
            self.ief_module = IEFModule([1024, 1024],
                                        2048,
                                        num_output_params,
                                        iterations=ief_iters)
Exemple #25
0
def centernet_resnet18_coco(pretrained_backbone=False,
                            classes=80,
                            data_format="channels_last",
                            **kwargs):
    """
    CenterNet model on the base of ResNet-101b for COCO Detection from 'Objects as Points,'
    https://arxiv.org/abs/1904.07850.

    Parameters:
    ----------
    pretrained_backbone : bool, default False
        Whether to load the pretrained weights for feature extractor.
    classes : int, default 80
        Number of classes.
    data_format : str, default 'channels_last'
        The ordering of the dimensions in tensors.
    pretrained : bool, default False
        Whether to load the pretrained weights for model.
    root : str, default '~/.tensorflow/models'
        Location for keeping the model parameters.
    """
    backbone = resnet18(pretrained=pretrained_backbone).features
    backbone._layers.pop()
    return get_centernet(backbone=backbone,
                         backbone_out_channels=512,
                         classes=classes,
                         model_name="centernet_resnet18_coco",
                         data_format=data_format,
                         **kwargs)
Exemple #26
0
def generate_cammodel(config):
    from models.resnet import get_fine_tuning_parameters

    if config.model_depth == 10:
        model = resnet.resnet10(num_classes=config.n_classes,
                                shortcut_type=config.resnet_shortcut,
                                sample_size=config.sample_size,
                                sample_duration=config.sample_duration,
                                channels=config.channels)
    elif config.model_depth == 18:
        model = resnet.resnet18(num_classes=config.n_classes,
                                shortcut_type=config.resnet_shortcut,
                                sample_size=config.sample_size,
                                sample_duration=config.sample_duration,
                                channels=config.channels)
    elif config.model_depth == 34:
        model = resnet.resnet34(num_classes=config.n_classes,
                                shortcut_type=config.resnet_shortcut,
                                sample_size=config.sample_size,
                                sample_duration=config.sample_duration,
                                channels=config.channels)
    elif config.model_depth == 50:
        model = resnet.resnet50(num_classes=config.n_classes,
                                shortcut_type=config.resnet_shortcut,
                                sample_size=config.sample_size,
                                sample_duration=config.sample_duration,
                                channels=config.channels)
    if not config.no_cuda:
        model = model.cuda()
        #model = nn.DataParallel(model, device_ids=None)
    return model, model.parameters()
Exemple #27
0
def get_backbone(p):
    """ Return the backbone """

    if p['backbone'] == 'resnet18':
        from models.resnet import resnet18
        backbone = resnet18(p['backbone_kwargs']['pretrained'])
        backbone_channels = 512

    elif p['backbone'] == 'resnet50':
        from models.resnet import resnet50
        backbone = resnet50(p['backbone_kwargs']['pretrained'])
        backbone_channels = 2048

    elif p['backbone'] == 'hrnet_w18':
        from models.seg_hrnet import hrnet_w18
        backbone = hrnet_w18(p['backbone_kwargs']['pretrained'])
        backbone_channels = [18, 36, 72, 144]

    else:
        raise NotImplementedError

    if p['backbone_kwargs']['dilated']:  # Add dilated convolutions
        assert (p['backbone'] in ['resnet18', 'resnet50'])
        from models.resnet_dilated import ResnetDilated
        backbone = ResnetDilated(backbone)

    if 'fuse_hrnet' in p['backbone_kwargs'] and p['backbone_kwargs'][
            'fuse_hrnet']:  # Fuse the multi-scale HRNet features
        from models.seg_hrnet import HighResolutionFuse
        backbone = torch.nn.Sequential(
            backbone, HighResolutionFuse(backbone_channels, 256))
        backbone_channels = sum(backbone_channels)

    return backbone, backbone_channels
Exemple #28
0
def Resnet(opt):

    assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200]

    if opt.model_depth == 10:
        model = resnet.resnet10(
            num_classes=opt.n_classes)
    elif opt.model_depth == 18:
        model = resnet.resnet18(
            num_classes=opt.n_classes,
            pool=opt.pool)
    elif opt.model_depth == 34:
        model = resnet.resnet34(
            num_classes=opt.n_classes,
            pool=opt.pool)
    elif opt.model_depth == 50:
        model = resnet.resnet50(
            num_classes=opt.n_classes,
            pool=opt.pool)
    elif opt.model_depth == 101:
        model = resnet.resnet101(
            num_classes=opt.n_classes)
    elif opt.model_depth == 152:
        model = resnet.resnet152(
            num_classes=opt.n_classes)
    elif opt.model_depth == 200:
        model = resnet.resnet200(
            num_classes=opt.n_classes)
    return model 
Exemple #29
0
def select_model(model_def):
    if model_def.lower() == 'hourglass':
        model = Net_HM_HG(21)
        print('HourGlass Net is loaded')
    elif model_def.lower() == 'posehg':
        model = Net_Pose_HG(21)
        print('PoseHG Net is loaded')
    elif model_def.lower() == 'graphuhand':
        model = GraphUHandNet()
        print('GraphUHand Net is loaded')
    elif model_def.lower() == 'resnet10':
        # model = resnet10(pretrained=False, num_classes=29*2)
        model = resnet10(pretrained=False, num_classes=21 * 2)
        print('ResNet10 is loaded')
    elif model_def.lower() == 'resnet18':
        # model = resnet18(pretrained=False, num_classes=29*2)
        model = resnet18(pretrained=False, num_classes=21 * 2)
        print('ResNet18 is loaded')
    elif model_def.lower() == 'resnet50':
        # model = resnet50(pretrained=False, num_classes=29*2)
        model = resnet50(pretrained=False, num_classes=21 * 2)
        print('ResNet50 is loaded')
    elif model_def.lower() == 'resnet101':
        # model = resnet101(pretrained=False, num_classes=29*2)
        model = resnet101(pretrained=False, num_classes=21 * 2)
        print('ResNet101 is loaded')
    elif model_def.lower() == 'graphunet':
        model = GraphUNet(in_features=2, out_features=3)
        print('GraphUNet is loaded')
    elif model_def.lower() == 'graphnet':
        model = GraphNet(in_features=2, out_features=3)
        print('GraphNet is loaded')
    else:
        raise NameError('Undefined model')
    return model
def main():
    # Distributed setting.
    torch.distributed.init_process_group(backend='nccl', init_method='env://')
    device_id = get_device_id()
    torch.cuda.set_device(device_id)
    device = f'cuda:{device_id}'

    # Automatic mixed precision.
    scaler = torch.cuda.amp.GradScaler()

    batch_size = 128
    normalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}
    lr = 1e-1
    epochs = 200

    trainset = ImageNet(os.environ['DATAROOT'],
                        transform=common_train((224, 224)),
                        train=True,
                        subset=50)
    testset = ImageNet(os.environ['DATAROOT'],
                       transform=common_test((224, 224)),
                       train=False,
                       subset=50)
    # Use distributed sampler to map data parts to different CUDA devices.
    trainsampler = torch.utils.data.distributed.DistributedSampler(trainset)
    testsampler = torch.utils.data.distributed.DistributedSampler(testset)
    train_loader = DataLoader(trainset,
                              batch_size=batch_size,
                              sampler=trainsampler,
                              pin_memory=False,
                              num_workers=12)
    test_loader = DataLoader(testset,
                             batch_size=batch_size,
                             sampler=testsampler,
                             pin_memory=False,
                             num_workers=12)

    model = resnet18(**normalize, class_num=50).to(device)
    # Distributed: convert the BN layers of the model into sync-BN layers.
    model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
    # Distributed: create DDP model.
    model = nn.parallel.DistributedDataParallel(model,
                                                device_ids=[device_id],
                                                output_device=device_id)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=1e-4,
                          nesterov=True)
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, epochs, 1e-4,
                                                     -1)

    # Use DistRunner to run DDP train and evaluation.
    runner = DistRunner(model, train_loader, test_loader, criterion, optimizer,
                        scheduler, scaler, epochs, 10)

    tqdm.write("Start training with Resnet18.")
    runner.train()
Exemple #31
0
def generate_model(opt):
    assert opt.model in [
        'resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet'
    ]

    if opt.model == 'resnet':
        assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200]

        from models.resnet import get_fine_tuning_parameters

        if opt.model_depth == 10:
            model = resnet.resnet10(
                num_classes=opt.n_classes,
                shortcut_type=opt.resnet_shortcut,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
        elif opt.model_depth == 18:
            model = resnet.resnet18(
                num_classes=opt.n_classes,
                shortcut_type=opt.resnet_shortcut,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
        elif opt.model_depth == 34:
            model = resnet.resnet34(
                num_classes=opt.n_classes,
                shortcut_type=opt.resnet_shortcut,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
        elif opt.model_depth == 50:
            model = resnet.resnet50(
                num_classes=opt.n_classes,
                shortcut_type=opt.resnet_shortcut,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
        elif opt.model_depth == 101:
            model = resnet.resnet101(
                num_classes=opt.n_classes,
                shortcut_type=opt.resnet_shortcut,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
        elif opt.model_depth == 152:
            model = resnet.resnet152(
                num_classes=opt.n_classes,
                shortcut_type=opt.resnet_shortcut,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
        elif opt.model_depth == 200:
            model = resnet.resnet200(
                num_classes=opt.n_classes,
                shortcut_type=opt.resnet_shortcut,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
    elif opt.model == 'wideresnet':
        assert opt.model_depth in [50]

        from models.wide_resnet import get_fine_tuning_parameters

        if opt.model_depth == 50:
            model = wide_resnet.resnet50(
                num_classes=opt.n_classes,
                shortcut_type=opt.resnet_shortcut,
                k=opt.wide_resnet_k,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
    elif opt.model == 'resnext':
        assert opt.model_depth in [50, 101, 152]

        from models.resnext import get_fine_tuning_parameters

        if opt.model_depth == 50:
            model = resnext.resnet50(
                num_classes=opt.n_classes,
                shortcut_type=opt.resnet_shortcut,
                cardinality=opt.resnext_cardinality,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
        elif opt.model_depth == 101:
            model = resnext.resnet101(
                num_classes=opt.n_classes,
                shortcut_type=opt.resnet_shortcut,
                cardinality=opt.resnext_cardinality,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
        elif opt.model_depth == 152:
            model = resnext.resnet152(
                num_classes=opt.n_classes,
                shortcut_type=opt.resnet_shortcut,
                cardinality=opt.resnext_cardinality,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
    elif opt.model == 'preresnet':
        assert opt.model_depth in [18, 34, 50, 101, 152, 200]

        from models.pre_act_resnet import get_fine_tuning_parameters

        if opt.model_depth == 18:
            model = pre_act_resnet.resnet18(
                num_classes=opt.n_classes,
                shortcut_type=opt.resnet_shortcut,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
        elif opt.model_depth == 34:
            model = pre_act_resnet.resnet34(
                num_classes=opt.n_classes,
                shortcut_type=opt.resnet_shortcut,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
        elif opt.model_depth == 50:
            model = pre_act_resnet.resnet50(
                num_classes=opt.n_classes,
                shortcut_type=opt.resnet_shortcut,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
        elif opt.model_depth == 101:
            model = pre_act_resnet.resnet101(
                num_classes=opt.n_classes,
                shortcut_type=opt.resnet_shortcut,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
        elif opt.model_depth == 152:
            model = pre_act_resnet.resnet152(
                num_classes=opt.n_classes,
                shortcut_type=opt.resnet_shortcut,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
        elif opt.model_depth == 200:
            model = pre_act_resnet.resnet200(
                num_classes=opt.n_classes,
                shortcut_type=opt.resnet_shortcut,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
    elif opt.model == 'densenet':
        assert opt.model_depth in [121, 169, 201, 264]

        from models.densenet import get_fine_tuning_parameters

        if opt.model_depth == 121:
            model = densenet.densenet121(
                num_classes=opt.n_classes,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
        elif opt.model_depth == 169:
            model = densenet.densenet169(
                num_classes=opt.n_classes,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
        elif opt.model_depth == 201:
            model = densenet.densenet201(
                num_classes=opt.n_classes,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
        elif opt.model_depth == 264:
            model = densenet.densenet264(
                num_classes=opt.n_classes,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)

    if not opt.no_cuda:
        model = model.cuda()
        model = nn.DataParallel(model, device_ids=None)

        if opt.pretrain_path:
            print('loading pretrained model {}'.format(opt.pretrain_path))
            pretrain = torch.load(opt.pretrain_path)
            assert opt.arch == pretrain['arch']

            model.load_state_dict(pretrain['state_dict'])

            if opt.model == 'densenet':
                model.module.classifier = nn.Linear(
                    model.module.classifier.in_features, opt.n_finetune_classes)
                model.module.classifier = model.module.classifier.cuda()
            else:
                model.module.fc = nn.Linear(model.module.fc.in_features,
                                            opt.n_finetune_classes)
                model.module.fc = model.module.fc.cuda()

            parameters = get_fine_tuning_parameters(model, opt.ft_begin_index)
            return model, parameters
    else:
        if opt.pretrain_path:
            print('loading pretrained model {}'.format(opt.pretrain_path))
            pretrain = torch.load(opt.pretrain_path)
            assert opt.arch == pretrain['arch']

            model.load_state_dict(pretrain['state_dict'])

            if opt.model == 'densenet':
                model.classifier = nn.Linear(
                    model.classifier.in_features, opt.n_finetune_classes)
            else:
                model.fc = nn.Linear(model.fc.in_features,
                                            opt.n_finetune_classes)

            parameters = get_fine_tuning_parameters(model, opt.ft_begin_index)
            return model, parameters

    return model, model.parameters()