def model_selection(self): if self.config.model_name == "resnet18": return resnet18(in_channels=int(self.input_shape[0]), num_classes=self.label_num) elif self.config.model_name == "resnet34": return resnet34(in_channels=int(self.input_shape[0]), num_classes=self.label_num) elif self.config.model_name == "resnet50": return resnet50(in_channels=int(self.input_shape[0]), num_classes=self.label_num) elif self.config.model_name == "resnet101": return resnet101(in_channels=int(self.input_shape[0]), num_classes=self.label_num) elif self.config.model_name == "resnet152": return resnet152(in_channels=int(self.input_shape[0]), num_classes=self.label_num) elif self.config.model_name == "convnet": return convnet(in_channels=int(self.input_shape[0]), num_classes=self.label_num) elif self.config.model_name == "resnet50": return wide_resnet50_2(in_channels=int(self.input_shape[0]), num_classes=self.label_num) elif self.config.model_name == "resnet101": return wide_resnet101_2(in_channels=int(self.input_shape[0]), num_classes=self.label_num)
def __init__(self, imgH, nc, nclass, nh, n_rnn=2, leakyRelu=False): super(CRNN, self).__init__() assert imgH % 16 == 0, 'imgH has to be a multiple of 16' self.cnn = resnet.resnet34() self.rnn = nn.Sequential(BidirectionalLSTM(512, nh, nh), BidirectionalLSTM(nh, nh, nclass))
def get_network(args, model_args, use_gpu=True): """ return given network """ if args.net == 'resnet18': from models.resnet import resnet18 net = resnet18(**model_args) elif args.net == 'resnet34': from models.resnet import resnet34 net = resnet34(**model_args) elif args.net == 'resnet50': from models.resnet import resnet50 net = resnet50(**model_args) elif args.net == 'resnet101': from models.resnet import resnet101 net = resnet101(**model_args) elif args.net == 'resnet152': from models.resnet import resnet152 net = resnet152(**model_args) else: print('the network name you have entered is not supported yet') sys.exit() if use_gpu: net = net.cuda() return net
def __init__(self, config, device, **kwargs): super(Net_small, self).__init__() layers_ = config.MODEL.LAYERS pretrained = config.MODEL.PRETRAINED classes = config.MODEL.classes atrous_rates = config.MODEL.atrous_rates self.device = device if layers_ == 50: resnet = model.resnet50(pretrained=pretrained) elif layers_ == 34: resnet = model.resnet34(pretrained=pretrained) else: resnet = model.resnet152(pretrained=pretrained) self.layer0 = nn.Sequential(resnet.conv1_my, resnet.bn1, resnet.relu) self.max_pool = resnet.maxpool self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4 # del resnet self.relu = nn.ReLU() self.confidence = Confidence(classes, 1) self.aspp = ASPP(in_channels=512, atrous_rates=atrous_rates) self.cls = nn.Sequential( nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Conv2d(256, classes, kernel_size=1)) initialize_weights(self.confidence) initialize_weights(self.cls) initialize_weights(self.aspp)
def network_config(args): if args.network == 'resnet34': network = resnet34() print('-------> Creating network resnet-34') else: network = resnet18() print('-------> Creating network resnet-18') network = torch.nn.DataParallel(network).cuda() print('Total params: %2.fM' % (sum(p.numel() for p in network.parameters()) / 1000000.0)) cudnn.benchmark = True optimizer = optim.SGD(network.parameters(), lr=args.lr, momentum=args.momentum) #device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') use_cuda = torch.cuda.is_available() # Random seed manualSeed = random.randint(1, 10000) random.seed(manualSeed) np.random.seed(manualSeed) torch.manual_seed(manualSeed) if use_cuda: torch.cuda.manual_seed_all(manualSeed) return network, optimizer, use_cuda
def get_model(model_name, channels_num, classes_num=2, image_size=64): # resnext mode if 'resnext_50' in model_name: model = resnet.resnext50_32x4d(in_chs=channels_num, classes_num=classes_num) elif 'resnext_101' in model_name: model = resnet.resnext101_32x8d(in_chs=channels_num, classes_num=classes_num) # resnet mode elif 'resnet_34' in model_name: model = resnet.resnet34(in_chs=channels_num, classes_num=classes_num) elif 'resnet_50' in model_name: model = resnet.resnet50(in_chs=channels_num, classes_num=classes_num) elif 'resnet_101' in model_name: model = resnet.resnet101(in_chs=channels_num, classes_num=classes_num) elif 'wide_resnet50_2' in model_name: model = resnet.wide_resnet50_2(in_chs=channels_num, classes_num=classes_num) # efficientnet mode elif 'efficientnet_b4' in model_name: model = ef_net.from_name('efficientnet-b4', channels_num, image_size=image_size, num_classes=classes_num) elif 'efficientnet_b5' in model_name: model = ef_net.from_name('efficientnet-b5', channels_num, image_size=image_size, num_classes=classes_num) elif 'efficientnet_b7' in model_name: model = ef_net.from_name('efficientnet-b7', channels_num, image_size=image_size, num_classes=classes_num) else: # _, global_params = efficientnet(image_size=image_size, num_classes=classes_num) model = ef_net.from_name('efficientnet-b0', channels_num, image_size=image_size, num_classes=classes_num) return model
def generate_model(opt): assert opt.model_name in ['resnet'] if opt.model_name == 'resnet': assert opt.mode in ['score', 'feature'] if opt.mode == 'score': last_fc = True elif opt.mode == 'feature': last_fc = False assert opt.model_depth in [18, 34, 50, 101] if opt.model_depth == 18: model = resnet.resnet18(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, last_fc=last_fc) elif opt.model_depth == 34: model = resnet.resnet34(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, last_fc=last_fc) elif opt.model_depth == 50: model = resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, last_fc=last_fc) elif opt.model_depth == 101: model = resnet.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, last_fc=last_fc) if not opt.no_cuda: model = model.cuda() model = nn.DataParallel(model, device_ids=None) return model
def __init__(self, out_channels=2): super(FCN4x, self).__init__() # backbone -- resnet # which is slightly different from the original FCN backbone = resnet34() self.block1 = nn.Sequential(backbone.conv1, backbone.bn1, backbone.relu, backbone.maxpool) self.block2 = backbone.layer1 self.block3 = backbone.layer2 self.block4 = backbone.layer3 self.convs = nn.Sequential(Block(256, 512, kernel=3, padding=1), Block(512, 512, kernel=3, padding=1)) # prediction at each scale self.pred_x_16 = nn.Conv2d(512, out_channels, kernel_size=1) self.pred_x_8 = nn.Conv2d(128, out_channels, kernel_size=1) self.pred_x_4 = nn.Conv2d(64, out_channels, kernel_size=1) # upsampling self.convs5 = Block(out_channels, out_channels, kernel=3, stride=1, padding=1, bias=False) self.convs4 = Block(out_channels, out_channels, kernel=3, stride=1, padding=1, bias=False) self.convs3 = Block(out_channels, out_channels, kernel=3, stride=1, padding=1, bias=False)
def Resnet(opt): assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] if opt.model_depth == 10: model = resnet.resnet10( num_classes=opt.n_classes) elif opt.model_depth == 18: model = resnet.resnet18( num_classes=opt.n_classes, pool=opt.pool) elif opt.model_depth == 34: model = resnet.resnet34( num_classes=opt.n_classes, pool=opt.pool) elif opt.model_depth == 50: model = resnet.resnet50( num_classes=opt.n_classes, pool=opt.pool) elif opt.model_depth == 101: model = resnet.resnet101( num_classes=opt.n_classes) elif opt.model_depth == 152: model = resnet.resnet152( num_classes=opt.n_classes) elif opt.model_depth == 200: model = resnet.resnet200( num_classes=opt.n_classes) return model
def generate_cammodel(config): from models.resnet import get_fine_tuning_parameters if config.model_depth == 10: model = resnet.resnet10(num_classes=config.n_classes, shortcut_type=config.resnet_shortcut, sample_size=config.sample_size, sample_duration=config.sample_duration, channels=config.channels) elif config.model_depth == 18: model = resnet.resnet18(num_classes=config.n_classes, shortcut_type=config.resnet_shortcut, sample_size=config.sample_size, sample_duration=config.sample_duration, channels=config.channels) elif config.model_depth == 34: model = resnet.resnet34(num_classes=config.n_classes, shortcut_type=config.resnet_shortcut, sample_size=config.sample_size, sample_duration=config.sample_duration, channels=config.channels) elif config.model_depth == 50: model = resnet.resnet50(num_classes=config.n_classes, shortcut_type=config.resnet_shortcut, sample_size=config.sample_size, sample_duration=config.sample_duration, channels=config.channels) if not config.no_cuda: model = model.cuda() #model = nn.DataParallel(model, device_ids=None) return model, model.parameters()
def get_model(train_model): if train_model == 'resnet18': return resnet.resnet18() elif train_model == 'resnet34': return resnet.resnet34() elif train_model == 'resnet50': return resnet.resnet50() elif train_model == 'resnet101': return resnet.resnet101() elif train_model == 'resnet152': return resnet.resnet152() elif train_model == 'resnet18_copy': return resnet_copy.resnet18() elif train_model == 'resnet34_copy': return resnet_copy.resnet34() elif train_model == 'resnet50_copy': return resnet_copy.resnet50() elif train_model == 'resnet101_copy': return resnet_copy.resnet101() elif train_model == 'resnet152': return resnet_copy.resnet152() elif train_model == 'vgg11': return vgg11() elif train_model == 'vgg13': return vgg13() elif train_model == 'vgg16': return vgg16() elif train_model == 'vgg19': return vgg19() elif train_model == 'nin': return nin() elif train_model == 'googlenet': return googlenet()
def __init__(self, output_channels=2): super(DeeperLabC, self).__init__() self.backbone = resnet34() # for high level features self.aspp = ASPP() # for low level features self.low_level_f_convs = Block(64, 32, kernel=1) self.s2d = Space2Depth(stride=4) # after concat self.convs_channel = 128 self.convs1 = Block(256 + 512, self.convs_channel, kernel=7, stride=1, padding=3) self.convs2 = Block(self.convs_channel, self.convs_channel, kernel=7, stride=1, padding=3) self.d2s = nn.PixelShuffle(upscale_factor=4) # [128/16, 128, 128] # segmentation self.convs3 = Block(self.convs_channel // 16, self.convs_channel // 16, kernel=7, stride=1, padding=3) self.convs4 = nn.Conv2d(self.convs_channel // 16, output_channels, kernel_size=1)
def __init__(self, opt): ## switch to any model you prefer self.model = resnet.resnet34(pretrained=True, num_classes=opt.n_classes) self.model = self.model.cuda() #self.model = nn.DataParallel(self.model, device_ids=None) #print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load('resnet34/save_1.pth') # print(net) print( '-------------------- load the pretrained model --------------------------------------' ) saved_state_dict = pretrain['state_dict'] #saved_state_dict = pretrain.state_dict() print('----------------------------------------------------------') new_params = self.model.state_dict().copy() for name, param in new_params.items(): # print name if 'module.' + name in saved_state_dict and param.size( ) == saved_state_dict['module.' + name].size(): new_params[name].copy_(saved_state_dict['module.' + name]) self.model.load_state_dict(new_params) self.model.eval()
def generate_model(opt): model = resnet.resnet34(pretrained = True, num_classes=opt.n_classes) if not opt.no_cuda: model = model.cuda() model = nn.DataParallel(model, device_ids=None) if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) return model, model.parameters() else: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) return model, model.parameters() return model, model.parameters()
def get_res_34_features(pretrained_path='../models/res_34_features.pth', fix_block=False, fix_weights=False): #pretrained_path = '../models/res_34_features.pth' model_34, model_34_new = resnet34(num_classes=101, shortcut_type='A', sample_duration=16, sample_size=112) model_34.load_state_dict(torch.load(pretrained_path)) print('Copying feature extractor weights to new network...') for i, j in zip(model_34.children(), model_34_new.children()): # import ipdb;ipdb.set_trace() if i.__str__() == j.__str__(): if isinstance(i, nn.Sequential): for b1, b2 in zip(i, j): copy_weight_bias(b2, b1, basic_block=True) else: if not isinstance(i, (nn.ReLU, nn.MaxPool3d, nn.AvgPool3d)): copy_weight_bias(j, i) if fix_weights == True: for param1, param2 in zip(model_34.parameters(), model_34_new.parameters()): param2.requires_grad = False return (model_34_new, model_34)
def test_ssd_loss(self): anchors = losses.create_anchors(grid_sizes=[(7, 7), (4, 4), (2, 2), (1, 1)], zoom_levels=[1], aspect_ratios=[1]) self.assertEqual(anchors.size(), (70, 4)) loss = losses.SSDLoss(anchors, constants.TRANSFORMED_IMAGE_SIZE, num_classes=10) num_labels = 10 k = 1 batch_size = 4 num_channels = 3 image_size = (224, 224) base_model = resnet34(pretrained=True) head = SSDHead(k, num_labels, -3.) ssd = SSDModel(base_model, head) input = torch.zeros((batch_size, num_channels, image_size[0], image_size[1]), dtype=torch.float32) out = ssd(input) self.assertEqual(out[0].size(), (4, 70, 4)) self.assertEqual(out[1].size(), (4, 70, 11)) target = [(T([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=torch.float32), T([1, 2], dtype=torch.long)), (T([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=torch.float32), T([1, 2], dtype=torch.long)), (T([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=torch.float32), T([1, 2], dtype=torch.long)), (T([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=torch.float32), T([1, 2], dtype=torch.long))] l = loss.loss(out, target) # Sanity check: nothing fails, returns dict of expected structure self.assertEqual(len(l), 3) self.assertTrue('classification' in l) self.assertTrue('localization' in l) self.assertTrue('total' in l)
def __init__(self, lr=1e-4): super(DetNet, self).__init__() self.lr = lr self.feat = resnet.resnet34(True) self.rpn = self.build_rpn(512, 100) # self.reg = self.build_reg() # self.roi_pooling = nn.AdaptiveAvgPool2d((7, 7)) self.opt = optim.Adam(self.parameters(), lr=self.lr)
def __init__(self, is_evaluate=False): super(Net, self).__init__() self.model = caffe_resnet.resnet34() if is_evaluate: self.model.load_state_dict(torch.load('resnet.pkl')) def save_output(module, input, output): self.buffer = output self.model.layer4.register_forward_hook(save_output)
def generate_model(opt): assert opt.model in ['resnet', 'densenet', 'se_resnet'] if opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10(pretrained=True, num_classes=opt.n_classes) elif opt.model_depth == 18: model = resnet.resnet18(pretrained=True, num_classes=opt.n_classes) elif opt.model_depth == 34: model = resnet.resnet34(pretrained=True, num_classes=opt.n_classes) elif opt.model_depth == 50: model = resnet.resnet50(pretrained=True, num_classes=opt.n_classes) elif opt.model_depth == 101: model = resnet.resnet101(pretrained=True, num_classes=opt.n_classes) elif opt.model_depth == 152: model = resnet.resnet152(pretrained=True, num_classes=opt.n_classes) elif opt.model_depth == 200: model = resnet.resnet200(pretrained=True, num_classes=opt.n_classes) elif opt.model == 'se_resnet': assert opt.model_depth in [18, 34, 50, 101, 152] from models.se_resnet import get_fine_tuning_parameters if opt.model_depth == 18: model = se_resnet.resnet18(pretrained=True, num_classes=opt.n_classes) elif opt.model_depth == 34: model = se_resnet.resnet34(pretrained=True, num_classes=opt.n_classes) elif opt.model_depth == 50: model = se_resnet.resnet50(pretrained=True, num_classes=opt.n_classes) elif opt.model_depth == 101: model = se_resnet.resnet101(pretrained=True, num_classes=opt.n_classes) if not opt.no_cuda: model = model.cuda() model = nn.DataParallel(model, device_ids=None) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters else: parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters return model, model.parameters()
def __init__(self, num_classes=1, num_filters=32, pretrained=True, is_deconv=False, requires_grad=True): """ :param num_classes: :param num_filters: :param pretrained: False - no pre-trained network is used True - encoder is pre-trained with resnet34 :is_deconv: False: bilinear interpolation is used in decoder True: deconvolution is used in decoder """ super().__init__() self.num_classes = num_classes self.pool = nn.MaxPool2d(2, 2) self.encoder = resnet34(pretrained=pretrained) for params in self.encoder.parameters(): params.requires_grad = requires_grad self.relu = nn.ReLU(inplace=True) self.conv1 = nn.Sequential(self.encoder.conv1, self.encoder.bn1, self.encoder.relu, self.pool) self.conv2 = self.encoder.layer1 self.conv3 = self.encoder.layer2 self.conv4 = self.encoder.layer3 self.conv5 = self.encoder.layer4 self.center = DecoderBlock(512, num_filters * 8 * 2, num_filters * 8, is_deconv) self.dec5 = DecoderBlock(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv) self.dec4 = DecoderBlock(256 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv) self.dec3 = DecoderBlock(128 + num_filters * 8, num_filters * 4 * 2, num_filters * 2, is_deconv) self.dec2 = DecoderBlock(64 + num_filters * 2, num_filters * 2 * 2, num_filters * 2 * 2, is_deconv) self.dec1 = DecoderBlock(num_filters * 2 * 2, num_filters * 2 * 2, num_filters, is_deconv) self.dec0 = ConvRelu(num_filters, num_filters) self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)
def get_model_param(args): # assert args.model in ['resnet', 'vgg'] if args.model == 'resnet': assert args.model_depth in [18, 34, 50, 101, 152] from models.resnet import get_fine_tuning_parameters if args.model_depth == 18: model = resnet.resnet18(pretrained=False, input_size=args.input_size, num_classes=args.n_classes) elif args.model_depth == 34: model = resnet.resnet34(pretrained=False, input_size=args.input_size, num_classes=args.n_classes) elif args.model_depth == 50: model = resnet.resnet50(pretrained=False, input_size=args.input_size, num_classes=args.n_classes) elif args.model_depth == 101: model = resnet.resnet101(pretrained=False, input_size=args.input_size, num_classes=args.n_classes) elif args.model_depth == 152: model = resnet.resnet152(pretrained=False, input_size=args.input_size, num_classes=args.n_classes) # elif args.model == 'vgg': # pass # Load pretrained model here if args.finetune: pretrained_model = model_path[args.arch] args.pretrain_path = os.path.join(args.root_path, 'pretrained_models', pretrained_model) print("=> loading pretrained model '{}'...".format(pretrained_model)) model.load_state_dict(torch.load(args.pretrain_path)) # Only modify the last layer if args.model == 'resnet': model.fc = nn.Linear(model.fc.in_features, args.n_finetune_classes) # elif args.model == 'vgg': # pass parameters = get_fine_tuning_parameters(model, args.ft_begin_index, args.lr_mult1, args.lr_mult2) return model, parameters return model, model.parameters()
def __init__(self, embedding_size, num_classes, pretrained=False): super(FaceNetModel, self).__init__() self.model = resnet34(pretrained) self.embedding_size = embedding_size self.model.fc = nn.Linear(25088, self.embedding_size)#2048 * 3 * 3 self.model.classifier = nn.Linear(self.embedding_size, num_classes) self.embedfc = nn.Sequential(nn.Linear(32768, 256), #238144 1024 64 * 4 * 4 nn.PReLU(), nn.Linear(256, 256), nn.PReLU(), nn.Linear(256, 2) )
def get_network(args): if args.net == 'vgg16': from models.vgg import vgg16 model_ft = vgg16(args.num_classes, export_onnx=args.export_onnx) elif args.net == 'alexnet': from models.alexnet import alexnet model_ft = alexnet(num_classes=args.num_classes, export_onnx=args.export_onnx) elif args.net == 'mobilenet': from models.mobilenet import mobilenet_v2 model_ft = mobilenet_v2(pretrained=True, export_onnx=args.export_onnx) elif args.net == 'vgg19': from models.vgg import vgg19 model_ft = vgg19(args.num_classes, export_onnx=args.export_onnx) else: if args.net == 'googlenet': from models.googlenet import googlenet model_ft = googlenet(pretrained=True) elif args.net == 'inception': from models.inception import inception_v3 model_ft = inception_v3(args, pretrained=True, export_onnx=args.export_onnx) elif args.net == 'resnet18': from models.resnet import resnet18 model_ft = resnet18(pretrained=True, export_onnx=args.export_onnx) elif args.net == 'resnet34': from models.resnet import resnet34 model_ft = resnet34(pretrained=True, export_onnx=args.export_onnx) elif args.net == 'resnet101': from models.resnet import resnet101 model_ft = resnet101(pretrained=True, export_onnx=args.export_onnx) elif args.net == 'resnet50': from models.resnet import resnet50 model_ft = resnet50(pretrained=True, export_onnx=args.export_onnx) elif args.net == 'resnet152': from models.resnet import resnet152 model_ft = resnet152(pretrained=True, export_onnx=args.export_onnx) else: print("The %s is not supported..." % (args.net)) return if args.net == 'mobilenet': num_ftrs = model_ft.classifier[1].in_features model_ft.classifier[1] = nn.Linear(num_ftrs * 4, args.num_classes) else: num_ftrs = model_ft.fc.in_features model_ft.fc = nn.Linear(num_ftrs, args.num_classes) net = model_ft return net
def __init__(self, num_classes, depth, data_size, emb_name=[], pretrain_weight=None): super(ResNet, self).__init__() sample_size = data_size['width'] sample_duration = data_size['depth'] if depth == 34: pretrained_net = resnet.resnet34(sample_size=sample_size, sample_duration=sample_duration) elif depth == 50: pretrained_net = resnet.resnet50(sample_size=sample_size, sample_duration=sample_duration) elif depth == 101: pretrained_net = resnet.resnet101(sample_size=sample_size, sample_duration=sample_duration) else: pretrained_net = resnet.resnet18(sample_size=sample_size, sample_duration=sample_duration) num_ftrs = 9 * pretrained_net.fc.in_features if not pretrain_weight is None: try: state_dict = torch.load(pretrain_weight)['state_dict'] new_state_dict = OrderedDict() for k, v in state_dict.items(): name = k[7:] # remove 'module.' of dataparallel new_state_dict[name] = v pretrained_net.load_state_dict(new_state_dict) except: pass modules = nn.Sequential() modules.add_module('Flatten', Flatten()) modules.add_module('pr0', nn.ReLU()) modules.add_module('fc1', nn.Linear(num_ftrs, 1024, bias=True)) modules.add_module('pr1', nn.ReLU()) #modules.add_module('dp1', nn.Dropout()) #modules.add_module('fc2', NormalizedLinear(1024, num_classes)) modules.add_module('fc2', nn.Linear(1024, num_classes, bias=True)) #modules.add_module('dp2', nn.Dropout()) # init by xavier modules.apply(weights_init) pretrained_net.fc = modules self.net = FeatureExtractor(pretrained_net, emb_name)
def get_model(_model_name, _num_classes): if _model_name == 'resnet34': return resnet34(pretrained=settings.isPretrain, num_classes=num_classes) elif _model_name == 'alexnet': return alexnet(pretrained=settings.isPretrain, num_classes=num_classes) elif _model_name == 'densenet121': return densenet121(pretrained=settings.isPretrain, num_classes=num_classes) elif _model_name == 'vgg16_bn': return vgg16_bn(pretrained=settings.isPretrain, num_classes=num_classes) elif _model_name == 'shufflenetv2_x1_0': return shufflenetv2_x1_0(pretrained=settings.isPretrain, num_classes=num_classes) else: log.logger.error("model_name error!") exit(-1)
def generate_model(opt): """ if opt.model == 'resnet': from models import resnet model = resnet.resnet34(pretrained = True, num_classes=opt.n_classes) elif opt.model == 'vgg': from models import vgg model = vgg.vgg19(pretrained = True, num_classes=opt.n_classes) elif opt.model == 'alexnet': from models import alexnet model = alexnet.alexnet(pretrained = True, num_classes=opt.n_classes) elif opt.model == 'densenet': from models import densenet model = densenet.densenet169(pretrained = True, num_classes=opt.n_classes) elif opt.model == 'inception': from models import inception model = inception.inception_v3(pretrained = True, num_classes=opt.n_classes) elif opt.model == 'squeezenet': from models import squeezenet model = squeezenet.squeezenet1_1(pretrained = True, num_classes=opt.n_classes) else: print('Invalid model name') """ model = resnet.resnet34(pretrained = True, num_classes=opt.n_classes) if not opt.no_cuda: model = model.cuda() model = nn.DataParallel(model, device_ids=None) if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) return model, model.parameters() else: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) return model, model.parameters() return model, model.parameters()
def get_model(cfg, pretrained=False, load_param_from_ours=False): if load_param_from_ours: pretrained = False model = None num_classes = cfg.num_classes if cfg.model == 'custom': from models import custom_net if cfg.patch_size == 64: model = custom_net.net_64(num_classes = num_classes) elif cfg.patch_size == 32: model = custom_net.net_32(num_classes = num_classes) else: print('Do not support present patch size %s'%cfg.patch_size) #model = model elif cfg.model == 'googlenet': from models import inception_v3 model = inception_v3.inception_v3(pretrained = pretrained, num_classes = num_classes) elif cfg.model == 'vgg': from models import vgg if cfg.model_info == 19: model = vgg.vgg19_bn(pretrained = pretrained, num_classes = num_classes) elif cfg.model_info == 16: model = vgg.vgg16_bn(pretrained = pretrained, num_classes = num_classes) elif cfg.model == 'resnet': from models import resnet if cfg.model_info == 18: model = resnet.resnet18(pretrained= pretrained, num_classes = num_classes) elif cfg.model_info == 34: model = resnet.resnet34(pretrained= pretrained, num_classes = num_classes) elif cfg.model_info == 50: model = resnet.resnet50(pretrained= pretrained, num_classes = num_classes) elif cfg.model_info == 101: model = resnet.resnet101(pretrained= pretrained, num_classes = num_classes) if model is None: print('not support :' + cfg.model) sys.exit(-1) if load_param_from_ours: print('loading pretrained model from {0}'.format(cfg.init_model_file)) checkpoint = torch.load(cfg.init_model_file) model.load_state_dict(checkpoint['model_param']) model.cuda() print('shift model to parallel!') model = torch.nn.DataParallel(model, device_ids=cfg.gpu_id) return model
def test_forward_pass_ssd(self): num_labels = 10 k = 1 batch_size = 4 num_channels = 3 image_size = (224, 224) base_model = resnet34(pretrained=True) head = SSDHead(k, num_labels, -3.) ssd = SSDModel(base_model, head) input = torch.zeros( (batch_size, num_channels, image_size[0], image_size[1]), dtype=torch.float32) loc_flattened, class_flattened = ssd(input)
def get_network(args,cfg): """ return given network """ # pdb.set_trace() if args.net == 'lenet5': net = LeNet5().cuda() elif args.net == 'alexnet': net = alexnet(pretrained=args.pretrain, num_classes=cfg.PARA.train.num_classes).cuda() elif args.net == 'vgg16': net = vgg16(pretrained=args.pretrain, num_classes=cfg.PARA.train.num_classes).cuda() elif args.net == 'vgg13': net = vgg13(pretrained=args.pretrain,num_classes=cfg.PARA.train.num_classes).cuda() elif args.net == 'vgg11': net = vgg11(pretrained=args.pretrain,num_classes=cfg.PARA.train.num_classes).cuda() elif args.net == 'vgg19': net = vgg19(pretrained=args.pretrain,num_classes=cfg.PARA.train.num_classes).cuda() elif args.net == 'vgg16_bn': net = vgg16_bn(pretrained=args.pretrain,num_classes=cfg.PARA.train.num_classes).cuda() elif args.net == 'vgg13_bn': net = vgg13_bn(pretrained=args.pretrain,num_classes=cfg.PARA.train.num_classes).cuda() elif args.net == 'vgg11_bn': net = vgg11_bn(pretrained=args.pretrain,num_classes=cfg.PARA.train.num_classes).cuda() elif args.net == 'vgg19_bn': net = vgg19_bn(pretrained=args.pretrain,num_classes=cfg.PARA.train.num_classes).cuda() elif args.net =='inceptionv3': net = inception_v3().cuda() # elif args.net == 'inceptionv4': # net = inceptionv4().cuda() # elif args.net == 'inceptionresnetv2': # net = inception_resnet_v2().cuda() elif args.net == 'resnet18': net = resnet18(pretrained=args.pretrain,num_classes=cfg.PARA.train.num_classes).cuda(args.gpuid) elif args.net == 'resnet34': net = resnet34(pretrained=args.pretrain,num_classes=cfg.PARA.train.num_classes).cuda() elif args.net == 'resnet50': net = resnet50(pretrained=args.pretrain,num_classes=cfg.PARA.train.num_classes).cuda(args.gpuid) elif args.net == 'resnet101': net = resnet101(pretrained=args.pretrain,num_classes=cfg.PARA.train.num_classes).cuda() elif args.net == 'resnet152': net = resnet152(pretrained=args.pretrain,num_classes=cfg.PARA.train.num_classes).cuda() elif args.net == 'squeezenet': net = squeezenet1_0().cuda() else: print('the network name you have entered is not supported yet') sys.exit() return net
def load_model(backbone, device_ids, test_model_path, use_se): if backbone == 'resnet18_finger': model = resnet.resnet18_finger(use_se) elif backbone == 'resnet18': model = resnet.resnet18(pretrained=False) elif backbone == 'resnet34': model = resnet.resnet34(pretrained=False) elif backbone == 'resnet50': model = resnet.resnet50(pretrained=False) if opt.multi_gpus: model = DataParallel(model, device_ids=device_ids) model.load_state_dict(torch.load(test_model_path)) #model.to(torch.device("cuda")) if torch.cuda.is_available(): model = model.cuda() model.eval() return model
def generate_model(opt): assert opt.model in [ 'resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet' ] if opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 18: model = resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'wideresnet': assert opt.model_depth in [50] from models.wide_resnet import get_fine_tuning_parameters if opt.model_depth == 50: model = wide_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'resnext': assert opt.model_depth in [50, 101, 152] from models.resnext import get_fine_tuning_parameters if opt.model_depth == 50: model = resnext.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnext.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnext.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'preresnet': assert opt.model_depth in [18, 34, 50, 101, 152, 200] from models.pre_act_resnet import get_fine_tuning_parameters if opt.model_depth == 18: model = pre_act_resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = pre_act_resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = pre_act_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = pre_act_resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = pre_act_resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = pre_act_resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'densenet': assert opt.model_depth in [121, 169, 201, 264] from models.densenet import get_fine_tuning_parameters if opt.model_depth == 121: model = densenet.densenet121( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 169: model = densenet.densenet169( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 201: model = densenet.densenet201( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 264: model = densenet.densenet264( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) if not opt.no_cuda: model = model.cuda() model = nn.DataParallel(model, device_ids=None) if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.module.classifier = nn.Linear( model.module.classifier.in_features, opt.n_finetune_classes) model.module.classifier = model.module.classifier.cuda() else: model.module.fc = nn.Linear(model.module.fc.in_features, opt.n_finetune_classes) model.module.fc = model.module.fc.cuda() parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters else: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.classifier = nn.Linear( model.classifier.in_features, opt.n_finetune_classes) else: model.fc = nn.Linear(model.fc.in_features, opt.n_finetune_classes) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters return model, model.parameters()