def get_network(args, model_args, use_gpu=True): """ return given network """ if args.net == 'resnet18': from models.resnet import resnet18 net = resnet18(**model_args) elif args.net == 'resnet34': from models.resnet import resnet34 net = resnet34(**model_args) elif args.net == 'resnet50': from models.resnet import resnet50 net = resnet50(**model_args) elif args.net == 'resnet101': from models.resnet import resnet101 net = resnet101(**model_args) elif args.net == 'resnet152': from models.resnet import resnet152 net = resnet152(**model_args) else: print('the network name you have entered is not supported yet') sys.exit() if use_gpu: net = net.cuda() return net
def create_model(num_classes, args): if args.network == 100: model = resnet.resnet18(pretrained=args.pretrain) num_ftrs = model.fc.in_features model.fc = nn.Sequential(nn.Dropout(0.5), nn.Linear(num_ftrs, num_classes)) elif args.network == 101: model = resnet.resnet50(pretrained=args.pretrain) num_ftrs = model.fc.in_features model.fc = nn.Sequential(nn.Dropout(0.5), nn.Linear(num_ftrs, num_classes)) elif args.network == 102: architecture = os.path.basename(args.bit_model) model = resnetv2.KNOWN_MODELS[architecture.split('.')[0]]( head_size=num_classes, zero_head=True) model.load_from(np.load(args.bit_model)) print(f'Load pre-trained model {args.bit_model}') elif args.network == 103: model = resnet.resnet101(pretrained=args.pretrain) num_ftrs = model.fc.in_features model.fc = nn.Sequential(nn.Dropout(0.5), nn.Linear(num_ftrs, num_classes)) elif args.network == 104: model = microsoftvision.resnet50(pretrained=True) model.fc = model.fc = nn.Sequential(nn.Dropout(0.5), nn.Linear(2048, num_classes)) else: print('model not available! Using PyTorch ResNet50 as default') model = resnet.resnet50(pretrained=args.pretrain) num_ftrs = model.fc.in_features model.fc = nn.Linear(num_ftrs, num_classes) return model
def __init__(self, num_classes, layer, fpn_dim=512, pretrained=False, pool_sizes=(1, 2, 3, 6)): super(UperNet, self).__init__() if layer not in [50, 101]: raise ValueError( 'Resnet-{} is not supported \n' 'Currently supported models are Resnet 50, 101'.format(layer)) print('Model: UperNet_Resnet{}, Pretrained: {}'.format( layer, pretrained)) if layer == 50: self.resnet = resnet50(pretrained=pretrained) elif layer == 101: self.resnet = resnet101(pretrained=pretrained) expansion = self.resnet.layer1[0].expansion # 4 in_dims = [64, 128, 256, 512] in_dims = [dim * expansion for dim in in_dims] ppm_indim = in_dims[-1] self.pool_sizes = pool_sizes self.ppm = PyramidPoolingModule(ppm_indim, self.pool_sizes) ppm_outdim = ppm_indim * 2 in_dims[-1] = ppm_outdim self.fpn_module = FPNModule(num_classes, fpn_dim, in_dims=in_dims)
def __init__(self, num_classes=400, num_frames=64, num_keyframe=8, dropout_keep_prob=0.5): super(FGS3DIMG, self).__init__() self.num_frames = num_frames self.num_keyframe = num_keyframe self.num_classes = num_classes self.dropout_keep_prob = dropout_keep_prob ############################################## # Load resnet model ############################################## self.resnet_feature = resnet101(pretrained=False) num_ftrs = self.resnet_feature.fc.in_features self.resnet_feature.fc = nn.Linear(num_ftrs, num_classes) ResNet_state_dict = torch.load( '/data/Kinetics400/result/ResNetImg_lr0.00025/F90epochs/save_145.pth' ) ResNet_state_dict = ResNet_state_dict['state_dict'] new_state_dict = OrderedDict() for k, v in ResNet_state_dict.items(): name = k[22:] # remove `module.` new_state_dict[name] = v self.resnet_feature.load_state_dict(new_state_dict) print('debug')
def get_model(train_model): if train_model == 'resnet18': return resnet.resnet18() elif train_model == 'resnet34': return resnet.resnet34() elif train_model == 'resnet50': return resnet.resnet50() elif train_model == 'resnet101': return resnet.resnet101() elif train_model == 'resnet152': return resnet.resnet152() elif train_model == 'resnet18_copy': return resnet_copy.resnet18() elif train_model == 'resnet34_copy': return resnet_copy.resnet34() elif train_model == 'resnet50_copy': return resnet_copy.resnet50() elif train_model == 'resnet101_copy': return resnet_copy.resnet101() elif train_model == 'resnet152': return resnet_copy.resnet152() elif train_model == 'vgg11': return vgg11() elif train_model == 'vgg13': return vgg13() elif train_model == 'vgg16': return vgg16() elif train_model == 'vgg19': return vgg19() elif train_model == 'nin': return nin() elif train_model == 'googlenet': return googlenet()
def __init__(self, num_classes=2, resnet_arch='resnet50', output_stride=8, layer_num=2): super(Resnet_seg, self).__init__() self.output_stride = output_stride self.layer_num = layer_num # 注意用的是50还是101 if resnet_arch == 'resnet50': encoder = resnet.resnet50(True, output_stride=self.output_stride) elif resnet_arch == 'resnet101': encoder = resnet.resnet101(True, output_stride=self.output_stride) encoder = encoder._modules # Covert class instance into orderdict # Encoder self.conv1 = nn.Sequential(encoder['conv1'], encoder['bn1'], encoder['relu']) self.pool1 = encoder['maxpool'] # s/4 - 64dim self.layers = nn.Sequential() for i in range(layer_num): self.layers.add_module('layer%d' % (i + 1), encoder['layer%d' % (i + 1)]) layers_dim = [256, 512, 1024, 2048, 2048, 1024, 512] # Decoder self.decoder_conv1 = self._make_layer(layers_dim, 256, 3, padding=1) # siis_size self.decoder_conv2 = self._make_layer(256, 256, 3, padding=1) # s/4 self.out_conv = nn.Conv2d(256, num_classes, 1, 1) # s/1 - output self.model_name = resnet_arch + '_seg'
def __init__(self, model='resnet18'): super(VOSNet, self).__init__() self.model = model if model == 'resnet18': resnet = resnet18(pretrained=True) self.backbone = nn.Sequential(*list(resnet.children())[0:8]) elif model == 'resnet50': resnet = resnet50(pretrained=True) self.backbone = nn.Sequential(*list(resnet.children())[0:8]) self.adjust_dim = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0, bias=False) self.bn256 = nn.BatchNorm2d(256) elif model == 'resnet101': resnet = resnet101(pretrained=True) self.backbone = nn.Sequential(*list(resnet.children())[0:8]) self.adjust_dim = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0, bias=False) self.bn256 = nn.BatchNorm2d(256) else: raise NotImplementedError
def generate_model(opt): assert opt.model in ['mfnet', 'resnext', 'resnet'] opt.cbam = 0 if opt.model == 'mfnet': if opt.mult_loss: model = MFNET_3D_T(opt) elif opt.time_focus: model = MFNET_3D_C(opt) #elif opt.mv: # model = MFNET_MV(opt) elif opt.small: model = MFNET_3D_S(opt) pretrain = torch.load(opt.pretrain_path) compressed_dict = compress_dict(pretrain, opt) model.cuda() model = nn.DataParallel(model) model.load_state_dict(compressed_dict, strict=True) else: model = MFNET_3D(opt.n_classes) elif opt.model == 'resnext': model = resnext.resnet101(opt) elif opt.model == 'resnet': model = resnet.resnet101(opt) if opt.mult_loss: param = model.student_model.parameters() else: param = model.parameters() return model, param
def get_model(model_name, channels_num, classes_num=2, image_size=64): # resnext mode if 'resnext_50' in model_name: model = resnet.resnext50_32x4d(in_chs=channels_num, classes_num=classes_num) elif 'resnext_101' in model_name: model = resnet.resnext101_32x8d(in_chs=channels_num, classes_num=classes_num) # resnet mode elif 'resnet_34' in model_name: model = resnet.resnet34(in_chs=channels_num, classes_num=classes_num) elif 'resnet_50' in model_name: model = resnet.resnet50(in_chs=channels_num, classes_num=classes_num) elif 'resnet_101' in model_name: model = resnet.resnet101(in_chs=channels_num, classes_num=classes_num) elif 'wide_resnet50_2' in model_name: model = resnet.wide_resnet50_2(in_chs=channels_num, classes_num=classes_num) # efficientnet mode elif 'efficientnet_b4' in model_name: model = ef_net.from_name('efficientnet-b4', channels_num, image_size=image_size, num_classes=classes_num) elif 'efficientnet_b5' in model_name: model = ef_net.from_name('efficientnet-b5', channels_num, image_size=image_size, num_classes=classes_num) elif 'efficientnet_b7' in model_name: model = ef_net.from_name('efficientnet-b7', channels_num, image_size=image_size, num_classes=classes_num) else: # _, global_params = efficientnet(image_size=image_size, num_classes=classes_num) model = ef_net.from_name('efficientnet-b0', channels_num, image_size=image_size, num_classes=classes_num) return model
def select_model(model_def): if model_def.lower() == 'hopenet': model = HopeNet() print('HopeNet is loaded') elif model_def.lower() == 'resnet10': model = resnet10(pretrained=False, num_classes=29 * 2) print('ResNet10 is loaded') elif model_def.lower() == 'resnet18': model = resnet18(pretrained=False, num_classes=29 * 2) print('ResNet18 is loaded') elif model_def.lower() == 'resnet50': model = resnet50(pretrained=False, num_classes=29 * 2) print('ResNet50 is loaded') elif model_def.lower() == 'resnet101': model = resnet101(pretrained=False, num_classes=29 * 2) print('ResNet101 is loaded') elif model_def.lower() == 'graphunet': model = GraphUNet(in_features=2, out_features=3) print('GraphUNet is loaded') elif model_def.lower() == 'graphnet': model = GraphNet(in_features=2, out_features=3) print('GraphNet is loaded') else: raise NameError('Undefined model') return model
def generate_model(opt): assert opt.model_name in ['resnet'] if opt.model_name == 'resnet': assert opt.mode in ['score', 'feature'] if opt.mode == 'score': last_fc = True elif opt.mode == 'feature': last_fc = False assert opt.model_depth in [18, 34, 50, 101] if opt.model_depth == 18: model = resnet.resnet18(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, last_fc=last_fc) elif opt.model_depth == 34: model = resnet.resnet34(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, last_fc=last_fc) elif opt.model_depth == 50: model = resnet.resnet50(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, last_fc=last_fc) elif opt.model_depth == 101: model = resnet.resnet101(num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, last_fc=last_fc) if not opt.no_cuda: model = model.cuda() model = nn.DataParallel(model, device_ids=None) return model
def Resnet(opt): assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] if opt.model_depth == 10: model = resnet.resnet10( num_classes=opt.n_classes) elif opt.model_depth == 18: model = resnet.resnet18( num_classes=opt.n_classes, pool=opt.pool) elif opt.model_depth == 34: model = resnet.resnet34( num_classes=opt.n_classes, pool=opt.pool) elif opt.model_depth == 50: model = resnet.resnet50( num_classes=opt.n_classes, pool=opt.pool) elif opt.model_depth == 101: model = resnet.resnet101( num_classes=opt.n_classes) elif opt.model_depth == 152: model = resnet.resnet152( num_classes=opt.n_classes) elif opt.model_depth == 200: model = resnet.resnet200( num_classes=opt.n_classes) return model
def model_selection(self): if self.config.model_name == "resnet18": return resnet18(in_channels=int(self.input_shape[0]), num_classes=self.label_num) elif self.config.model_name == "resnet34": return resnet34(in_channels=int(self.input_shape[0]), num_classes=self.label_num) elif self.config.model_name == "resnet50": return resnet50(in_channels=int(self.input_shape[0]), num_classes=self.label_num) elif self.config.model_name == "resnet101": return resnet101(in_channels=int(self.input_shape[0]), num_classes=self.label_num) elif self.config.model_name == "resnet152": return resnet152(in_channels=int(self.input_shape[0]), num_classes=self.label_num) elif self.config.model_name == "convnet": return convnet(in_channels=int(self.input_shape[0]), num_classes=self.label_num) elif self.config.model_name == "resnet50": return wide_resnet50_2(in_channels=int(self.input_shape[0]), num_classes=self.label_num) elif self.config.model_name == "resnet101": return wide_resnet101_2(in_channels=int(self.input_shape[0]), num_classes=self.label_num)
def select_model(model_def): if model_def.lower() == 'hourglass': model = Net_HM_HG(21) print('HourGlass Net is loaded') elif model_def.lower() == 'posehg': model = Net_Pose_HG(21) print('PoseHG Net is loaded') elif model_def.lower() == 'graphuhand': model = GraphUHandNet() print('GraphUHand Net is loaded') elif model_def.lower() == 'resnet10': # model = resnet10(pretrained=False, num_classes=29*2) model = resnet10(pretrained=False, num_classes=21 * 2) print('ResNet10 is loaded') elif model_def.lower() == 'resnet18': # model = resnet18(pretrained=False, num_classes=29*2) model = resnet18(pretrained=False, num_classes=21 * 2) print('ResNet18 is loaded') elif model_def.lower() == 'resnet50': # model = resnet50(pretrained=False, num_classes=29*2) model = resnet50(pretrained=False, num_classes=21 * 2) print('ResNet50 is loaded') elif model_def.lower() == 'resnet101': # model = resnet101(pretrained=False, num_classes=29*2) model = resnet101(pretrained=False, num_classes=21 * 2) print('ResNet101 is loaded') elif model_def.lower() == 'graphunet': model = GraphUNet(in_features=2, out_features=3) print('GraphUNet is loaded') elif model_def.lower() == 'graphnet': model = GraphNet(in_features=2, out_features=3) print('GraphNet is loaded') else: raise NameError('Undefined model') return model
def generate_model(config): model = resnet.resnet101( num_classes=config.getint('Network', 'classes'), shortcut_type=config.get('Network', 'resnet_shortcut'), cardinality=config.getint('Network', 'resnet_cardinality'), sample_size=config.getint('Network', 'sample_size'), sample_duration=config.getint('Network', 'sample_duration'), input_channels=config.getint('Network', 'input_channels'), ) if config.getboolean('Network', 'use_cuda'): model = model.cuda() else: os.environ["CUDA_VISIBLE_DEVICES"] = "-1" model = nn.DataParallel(model) pretrain_path = config.get('Network', 'pretrain_path') if pretrain_path: print('loading pretrained model {}'.format(pretrain_path)) pretrain = torch.load(pretrain_path) assert config.get('Network', 'arch') == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) model.module.fc = nn.Linear( model.module.fc.in_features, config.getint('Network', 'n_finetune_classes')) model.module.fc = model.module.fc.cuda() parameters = get_fine_tuning_parameters( model, config.getint('Network', 'ft_begin_index')) return model, parameters return model, model.parameters()
def __init__(self, embed_size): super(EncoderResnet3D, self).__init__() model_data = torch.load(WEIGHT_RESNET3D_PATH) model_state_dict = model_data['state_dict'] base_model = resnet101(num_classes=400, shortcut_type='B', sample_size=112, sample_duration=16) base_model.load_state_dict(removeModule(model_state_dict)) self.conv1 = base_model.conv1 self.bn1 = base_model.bn1 self.maxpool = base_model.maxpool self.layer1 = base_model.layer1 self.layer2 = base_model.layer2 self.layer3 = base_model.layer3 self.layer4 = base_model.layer4 self.avgpool = base_model.avgpool self.fc6 = nn.Linear(2048, 2048) self.fc7 = nn.Linear(2048, 2048) self.fc8 = nn.Linear(2048, embed_size) self.relu = nn.ReLU() self.dropout = nn.Dropout(0.5) self.bn = nn.BatchNorm1d(embed_size) # in case of CK+, useful self.classification = nn.Linear(2048, 7)
def refinenet_resnet101(weights_dir=None, **kwargs): if weights_dir: model = RefineNet_ResNet( resnet101(os.path.join(weights_dir, 'resnet101.pth')), refinenets(os.path.join(weights_dir, 'refinenets.pth')), clf(os.path.join(weights_dir, 'clf.pth'))) else: model = RefineNet_ResNet() return model
def __init__(self, num_classes=2, siis_size=[32, 32], width=1, kw=9, dim=128, arch=1, resnet_arch='resnet50', output_stride=8, layer_num=2): super(Deeplab_SIIS, self).__init__() self.siis_size = siis_size self.output_stride = output_stride self.layer_num = layer_num aspp_depth = 128 if resnet_arch == 'resnet50': encoder = resnet.resnet50(True, output_stride=self.output_stride) elif resnet_arch == 'resnet101': encoder = resnet.resnet101(True, output_stride=self.output_stride) encoder = encoder._modules # Covert class instance into orderdict # Encoder self.conv1 = nn.Sequential(encoder['conv1'], encoder['bn1'], encoder['relu']) self.pool1 = encoder['maxpool'] # s/4 - 64dim self.layers = nn.Sequential() for i in range(layer_num): self.layers.add_module('layer%d' % (i + 1), encoder['layer%d' % (i + 1)]) layers_dim = [256, 512, 1024, 2048, 2048, 1024, 512] # layer_outSize = [s/4, s/output_stride, s/output_stride, ...] self.conv2 = self._make_layer(64, 48, 1) # in: pool1(out) # ASPP self.aspp = ASPP( in_channel=layers_dim[layer_num - 1], depth=aspp_depth) # ASPP: in: layers(out), fix_size=s/8 # in: concat[conv2(out), Up(aspp(out))], out: siis(in) self.conv3 = self._make_layer(aspp_depth + 48, dim, 1) # s/4 # SIIS self.siis = SIIS(siis_size, width, kw, dim, arch) # size=siis_size, dim=dim # Decoder # in: siis(out), self.decoder_conv1 = self._make_layer(dim, dim, 3, padding=1) # s/siis_size self.decoder_conv2 = self._make_layer(dim, dim, 3, padding=1) # s/4 self.out_conv = nn.Conv2d(dim, num_classes, 1, 1) # s/1 - output self.model_name = 'deeplab2_' + self.siis.name
def __init__(self, config, **kwargs): super(Net, self).__init__() layers_ = config.MODEL.LAYERS pretrained = config.MODEL.PRETRAINED classes = config.MODEL.classes atrous_rates = config.MODEL.atrous_rates if layers_ == 50: resnet = model.resnet50(pretrained=pretrained) elif layers_ == 101: resnet = model.resnet101(pretrained=pretrained) else: resnet = model.resnet152(pretrained=pretrained) self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.conv2, resnet.bn2, resnet.relu, resnet.conv3, resnet.bn3, resnet.relu) self.max_pool = resnet.maxpool self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4 # del resnet self.side_output0 = SideOutput(128, num_class=classes) self.side_output1 = SideOutput(256, num_class=classes) self.side_output2 = SideOutput(512, num_class=classes) # self.side_output3 = SideOutput(1024, 1, kernel_sz=15, stride=8, padding=7) self.side_output4 = SideOutput(2048, num_class=classes) self.sigmoid = nn.Sigmoid() self.fuse = nn.Conv2d(4, 1, kernel_size=1, bias=False) self.relu = nn.ReLU() self.confidence = Confidence(classes, 1) self.cw = nn.Conv2d(2, 1, kernel_size=1, padding=0, bias=False) for n, m in self.layer3.named_modules(): if 'conv2' in n: m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1) elif 'downsample.0' in n: m.stride = (1, 1) for n, m in self.layer4.named_modules(): if 'conv2' in n: m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1) elif 'downsample.0' in n: m.stride = (1, 1) self.aspp = ASPP(in_channels=2048 + 256, atrous_rates=atrous_rates) self.bot_fine = nn.Conv2d(256, 48, kernel_size=1, bias=False) self.cls = nn.Sequential( nn.Conv2d(256 + 48, 256, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Conv2d(256, classes, kernel_size=1)) initialize_weights(self.confidence) initialize_weights(self.cls)
def initialize_model(model_name, num_dense_layers=2, dropout=0): ''' Initialise a model with a custom head to predict both sequence length and digits Parameters ---------- model_name : str Model Name can be either: ResNet VGG ConvNet BaselineCNN_dropout Returns ------- model : object The model to be initialize ''' if model_name[:3] == "VGG": model = VGG(model_name, num_classes=7) model.classifier = CustomHead(512) elif model_name[:6] == "ResNet": if model_name == "ResNet18": model = resnet18(num_classes=7) model.linear = CustomHead(512) elif model_name == "ResNet34": model = resnet18(num_classes=7) model.linear = CustomHead(512) elif model_name == "ResNet50": model = resnet50(num_classes=7) model.linear = CustomHead(512 * 4) elif model_name == "ResNet101": model = resnet101(num_classes=7) model.linear = CustomHead(512 * 4) elif model_name == "ResNet152": model = resnet152(num_classes=7) model.linear = CustomHead(512 * 4) elif model_name == "ConvNet": model = ConvModel(num_dense_layers=num_dense_layers, dropout=dropout) elif model_name == "BaselineCNNdropout": model = BaselineCNNdropout(num_classes=7, p=dropout) model.fc2 = CustomHead(4096) return model
def generate_model(opt): assert opt.model in ['resnet', 'densenet', 'se_resnet'] if opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10(pretrained=True, num_classes=opt.n_classes) elif opt.model_depth == 18: model = resnet.resnet18(pretrained=True, num_classes=opt.n_classes) elif opt.model_depth == 34: model = resnet.resnet34(pretrained=True, num_classes=opt.n_classes) elif opt.model_depth == 50: model = resnet.resnet50(pretrained=True, num_classes=opt.n_classes) elif opt.model_depth == 101: model = resnet.resnet101(pretrained=True, num_classes=opt.n_classes) elif opt.model_depth == 152: model = resnet.resnet152(pretrained=True, num_classes=opt.n_classes) elif opt.model_depth == 200: model = resnet.resnet200(pretrained=True, num_classes=opt.n_classes) elif opt.model == 'se_resnet': assert opt.model_depth in [18, 34, 50, 101, 152] from models.se_resnet import get_fine_tuning_parameters if opt.model_depth == 18: model = se_resnet.resnet18(pretrained=True, num_classes=opt.n_classes) elif opt.model_depth == 34: model = se_resnet.resnet34(pretrained=True, num_classes=opt.n_classes) elif opt.model_depth == 50: model = se_resnet.resnet50(pretrained=True, num_classes=opt.n_classes) elif opt.model_depth == 101: model = se_resnet.resnet101(pretrained=True, num_classes=opt.n_classes) if not opt.no_cuda: model = model.cuda() model = nn.DataParallel(model, device_ids=None) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters else: parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters return model, model.parameters()
def __init__(self, nclass, backbone, aux, se_loss, dilated=True, norm_layer=None, **kwargs): super(BaseNet, self).__init__() self.nclass = nclass self.pretrained = resnet101()
def CreatNet(opt): name = opt.model_name label_num = opt.label if name == 'lstm': net = lstm.lstm(opt.input_size, opt.time_step, input_nc=opt.input_nc, num_classes=label_num) elif name == 'cnn_1d': net = cnn_1d.cnn(opt.input_nc, num_classes=label_num) elif name == 'resnet18_1d': net = resnet_1d.resnet18() net.conv1 = nn.Conv1d(opt.input_nc, 64, 7, 2, 3, bias=False) net.fc = nn.Linear(512, label_num) elif name == 'resnet34_1d': net = resnet_1d.resnet34() net.conv1 = nn.Conv1d(opt.input_nc, 64, 7, 2, 3, bias=False) net.fc = nn.Linear(512, label_num) elif name == 'multi_scale_resnet_1d': net = multi_scale_resnet_1d.Multi_Scale_ResNet(inchannel=opt.input_nc, num_classes=label_num) elif name == 'micro_multi_scale_resnet_1d': net = micro_multi_scale_resnet_1d.Multi_Scale_ResNet( inchannel=opt.input_nc, num_classes=label_num) elif name == 'multi_scale_resnet': net = multi_scale_resnet.Multi_Scale_ResNet(inchannel=opt.input_nc, num_classes=label_num) elif name == 'dfcnn': net = dfcnn.dfcnn(num_classes=label_num) elif name in ['resnet101', 'resnet50', 'resnet18']: if name == 'resnet101': net = resnet.resnet101(pretrained=False) net.fc = nn.Linear(2048, label_num) elif name == 'resnet50': net = resnet.resnet50(pretrained=False) net.fc = nn.Linear(2048, label_num) elif name == 'resnet18': net = resnet.resnet18(pretrained=False) net.fc = nn.Linear(512, label_num) net.conv1 = nn.Conv2d(opt.input_nc, 64, 7, 2, 3, bias=False) elif 'densenet' in name: if name == 'densenet121': net = densenet.densenet121(pretrained=False, num_classes=label_num) elif name == 'densenet201': net = densenet.densenet201(pretrained=False, num_classes=label_num) elif name == 'squeezenet': net = squeezenet.squeezenet1_1(pretrained=False, num_classes=label_num, inchannel=1) return net
def get_model_param(args): # assert args.model in ['resnet', 'vgg'] if args.model == 'resnet': assert args.model_depth in [18, 34, 50, 101, 152] from models.resnet import get_fine_tuning_parameters if args.model_depth == 18: model = resnet.resnet18(pretrained=False, input_size=args.input_size, num_classes=args.n_classes) elif args.model_depth == 34: model = resnet.resnet34(pretrained=False, input_size=args.input_size, num_classes=args.n_classes) elif args.model_depth == 50: model = resnet.resnet50(pretrained=False, input_size=args.input_size, num_classes=args.n_classes) elif args.model_depth == 101: model = resnet.resnet101(pretrained=False, input_size=args.input_size, num_classes=args.n_classes) elif args.model_depth == 152: model = resnet.resnet152(pretrained=False, input_size=args.input_size, num_classes=args.n_classes) # elif args.model == 'vgg': # pass # Load pretrained model here if args.finetune: pretrained_model = model_path[args.arch] args.pretrain_path = os.path.join(args.root_path, 'pretrained_models', pretrained_model) print("=> loading pretrained model '{}'...".format(pretrained_model)) model.load_state_dict(torch.load(args.pretrain_path)) # Only modify the last layer if args.model == 'resnet': model.fc = nn.Linear(model.fc.in_features, args.n_finetune_classes) # elif args.model == 'vgg': # pass parameters = get_fine_tuning_parameters(model, args.ft_begin_index, args.lr_mult1, args.lr_mult2) return model, parameters return model, model.parameters()
def __init__(self, num_classes=2, resnet_arch='resnet50', output_stride=8, layer_num=2, aspp_rate=0): super().__init__() self.model_name = 'deeplabv3plus' self.layer_num = layer_num self.output_stride = output_stride aspp_depth = 256 if resnet_arch == 'resnet50': encoder = resnet.resnet50(True, output_stride=self.output_stride) elif resnet_arch == 'resnet101': encoder = resnet.resnet101(True, output_stride=self.output_stride) encoder = encoder._modules # Covert class instance into orderdict # decay=0.9997, epsilon=1e-5, scale=True self.conv1 = nn.Sequential(encoder['conv1'], encoder['bn1'], encoder['relu']) self.pool1 = encoder['maxpool'] # s/4 - 64dim self.layers = nn.Sequential() for i in range(layer_num): self.layers.add_module('layer%d' % (i + 1), encoder['layer%d' % (i + 1)]) layers_dim = [256, 512, 1024, 2048, 2048, 1024, 512] # layer_outSize = [s/4, s/output_stride, s/output_stride, ...] self.conv2 = self._make_layer(64, 48, 1) # in: pool1(out) rate_tabel = [1, 6, 12, 18, 24, 1, 3] if aspp_rate == 0: self.aspp = ASPP( in_channel=layers_dim[layer_num - 1], depth=aspp_depth) # ASPP: in: layers(out), fix_size=s/8 else: self.aspp = ASPP_test(layers_dim[layer_num - 1], aspp_depth, rate_tabel[aspp_rate]) # Decoder self.decoder_conv1 = self._make_layer( aspp_depth + 48, aspp_depth, 3, padding=1) # in: concat[conv2(out), Up(aspp(out))] self.decoder_conv2 = self._make_layer(aspp_depth, aspp_depth, 3, padding=1) # s/4 self.out_conv = nn.Conv2d(aspp_depth, num_classes, 1, 1) # s/1 - output
def get_network(args): if args.net == 'vgg16': from models.vgg import vgg16 model_ft = vgg16(args.num_classes, export_onnx=args.export_onnx) elif args.net == 'alexnet': from models.alexnet import alexnet model_ft = alexnet(num_classes=args.num_classes, export_onnx=args.export_onnx) elif args.net == 'mobilenet': from models.mobilenet import mobilenet_v2 model_ft = mobilenet_v2(pretrained=True, export_onnx=args.export_onnx) elif args.net == 'vgg19': from models.vgg import vgg19 model_ft = vgg19(args.num_classes, export_onnx=args.export_onnx) else: if args.net == 'googlenet': from models.googlenet import googlenet model_ft = googlenet(pretrained=True) elif args.net == 'inception': from models.inception import inception_v3 model_ft = inception_v3(args, pretrained=True, export_onnx=args.export_onnx) elif args.net == 'resnet18': from models.resnet import resnet18 model_ft = resnet18(pretrained=True, export_onnx=args.export_onnx) elif args.net == 'resnet34': from models.resnet import resnet34 model_ft = resnet34(pretrained=True, export_onnx=args.export_onnx) elif args.net == 'resnet101': from models.resnet import resnet101 model_ft = resnet101(pretrained=True, export_onnx=args.export_onnx) elif args.net == 'resnet50': from models.resnet import resnet50 model_ft = resnet50(pretrained=True, export_onnx=args.export_onnx) elif args.net == 'resnet152': from models.resnet import resnet152 model_ft = resnet152(pretrained=True, export_onnx=args.export_onnx) else: print("The %s is not supported..." % (args.net)) return if args.net == 'mobilenet': num_ftrs = model_ft.classifier[1].in_features model_ft.classifier[1] = nn.Linear(num_ftrs * 4, args.num_classes) else: num_ftrs = model_ft.fc.in_features model_ft.fc = nn.Linear(num_ftrs, args.num_classes) net = model_ft return net
def __init__(self, layers=50, bins=(1, 2, 3, 6), dropout=0.1, classes=21, use_ppm=True, pretrained=True): super(PSPNet, self).__init__() assert layers in [50, 101, 152] assert 2048 % len(bins) == 0 assert classes > 1 self.use_ppm = use_ppm if layers == 50: resnet = models.resnet50(pretrained=pretrained) elif layers == 101: resnet = models.resnet101(pretrained=pretrained) else: resnet = models.resnet152(pretrained=pretrained) self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.conv2, resnet.bn2, resnet.relu, resnet.conv3, resnet.bn3, resnet.relu, resnet.maxpool) self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4 for n, m in self.layer3.named_modules(): if 'conv2' in n: m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1) elif 'downsample.0' in n: m.stride = (1, 1) for n, m in self.layer4.named_modules(): if 'conv2' in n: m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1) elif 'downsample.0' in n: m.stride = (1, 1) fea_dim = 2048 if use_ppm: self.ppm = PPM(fea_dim, int(fea_dim / len(bins)), bins) fea_dim *= 2 self.cls = nn.Sequential( nn.Conv2d(fea_dim, 512, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(512), nn.ReLU(inplace=True), nn.Dropout2d(p=dropout), nn.Conv2d(512, classes, kernel_size=1)) if self.training: self.aux = nn.Sequential( nn.Conv2d(1024, 256, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Dropout2d(p=dropout), nn.Conv2d(256, classes, kernel_size=1))
def __init__(self, num_classes=400, num_frames=64, num_keyframe=8, dropout_keep_prob=0.5): super(FGS3D, self).__init__() self.num_frames = num_frames self.num_keyframe = num_keyframe self.num_classes = num_classes self.dropout_keep_prob = dropout_keep_prob ############################################## # Load resnet model ############################################## self.resnet_feature = resnet101(pretrained=False) num_ftrs = self.resnet_feature.fc.in_features self.resnet_feature.fc = nn.Linear(num_ftrs, num_classes) """ ResNet_state_dict = torch.load('/data/Kinetics400/result/ResNetImg_lr0.00025/F90epochs/save_145.pth') ResNet_state_dict = ResNet_state_dict['state_dict'] new_state_dict = OrderedDict() for k, v in ResNet_state_dict.items(): name = k[22:] # remove `module.` new_state_dict[name] = v self.resnet_feature.load_state_dict(new_state_dict) set_parameter_requires_grad(self.resnet_feature) """ self.feat_conv_3x3 = nn.Conv2d(in_channels=1024, out_channels=1024, kernel_size=3, padding=6, dilation=6) torch.nn.init.normal_(self.feat_conv_3x3.weight, mean=0., std=0.01) torch.nn.init.constant_(self.feat_conv_3x3.bias, 0.0) self.feat_conv_3x3_relu = nn.ReLU(inplace=True) self.logits_img_vid = nn.Linear(8 * 400, self.num_classes) torch.nn.init.normal_(self.logits_img_vid.weight, mean=0.0, std=0.01) torch.nn.init.constant_(self.logits_img_vid.bias, 0.0) state_dict = torch.load('/data/Kinetics400/result/img_vid/save_20.pth') state_dict = state_dict['state_dict'] new_state_dict = OrderedDict() for k, v in state_dict.items(): name = k[7:] # remove `module.` new_state_dict[name] = v self.load_state_dict(new_state_dict) set_parameter_requires_grad(self.resnet_feature)
def __init__(self, num_classes, depth, data_size, emb_name=[], pretrain_weight=None): super(ResNet, self).__init__() sample_size = data_size['width'] sample_duration = data_size['depth'] if depth == 34: pretrained_net = resnet.resnet34(sample_size=sample_size, sample_duration=sample_duration) elif depth == 50: pretrained_net = resnet.resnet50(sample_size=sample_size, sample_duration=sample_duration) elif depth == 101: pretrained_net = resnet.resnet101(sample_size=sample_size, sample_duration=sample_duration) else: pretrained_net = resnet.resnet18(sample_size=sample_size, sample_duration=sample_duration) num_ftrs = 9 * pretrained_net.fc.in_features if not pretrain_weight is None: try: state_dict = torch.load(pretrain_weight)['state_dict'] new_state_dict = OrderedDict() for k, v in state_dict.items(): name = k[7:] # remove 'module.' of dataparallel new_state_dict[name] = v pretrained_net.load_state_dict(new_state_dict) except: pass modules = nn.Sequential() modules.add_module('Flatten', Flatten()) modules.add_module('pr0', nn.ReLU()) modules.add_module('fc1', nn.Linear(num_ftrs, 1024, bias=True)) modules.add_module('pr1', nn.ReLU()) #modules.add_module('dp1', nn.Dropout()) #modules.add_module('fc2', NormalizedLinear(1024, num_classes)) modules.add_module('fc2', nn.Linear(1024, num_classes, bias=True)) #modules.add_module('dp2', nn.Dropout()) # init by xavier modules.apply(weights_init) pretrained_net.fc = modules self.net = FeatureExtractor(pretrained_net, emb_name)
def __init__(self, num_classes=400, num_frames=64, num_keyframe=8, dropout_keep_prob=0.5): super(FGS3DM, self).__init__() self.num_frames = num_frames self.num_keyframe = num_keyframe self.num_classes = num_classes self.dropout_keep_prob = dropout_keep_prob self.resnet_feature = resnet101(pretrained=True) num_ftrs = self.resnet_feature.fc.in_features self.resnet_feature.fc = nn.Linear(num_ftrs, num_classes)
def generate_model(opt): assert opt.model in [ 'resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet' ] if opt.model == 'resnet': assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200] from models.resnet import get_fine_tuning_parameters if opt.model_depth == 10: model = resnet.resnet10( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 18: model = resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'wideresnet': assert opt.model_depth in [50] from models.wide_resnet import get_fine_tuning_parameters if opt.model_depth == 50: model = wide_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, k=opt.wide_resnet_k, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'resnext': assert opt.model_depth in [50, 101, 152] from models.resnext import get_fine_tuning_parameters if opt.model_depth == 50: model = resnext.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = resnext.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = resnext.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, cardinality=opt.resnext_cardinality, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'preresnet': assert opt.model_depth in [18, 34, 50, 101, 152, 200] from models.pre_act_resnet import get_fine_tuning_parameters if opt.model_depth == 18: model = pre_act_resnet.resnet18( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 34: model = pre_act_resnet.resnet34( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 50: model = pre_act_resnet.resnet50( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 101: model = pre_act_resnet.resnet101( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 152: model = pre_act_resnet.resnet152( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 200: model = pre_act_resnet.resnet200( num_classes=opt.n_classes, shortcut_type=opt.resnet_shortcut, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model == 'densenet': assert opt.model_depth in [121, 169, 201, 264] from models.densenet import get_fine_tuning_parameters if opt.model_depth == 121: model = densenet.densenet121( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 169: model = densenet.densenet169( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 201: model = densenet.densenet201( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) elif opt.model_depth == 264: model = densenet.densenet264( num_classes=opt.n_classes, sample_size=opt.sample_size, sample_duration=opt.sample_duration) if not opt.no_cuda: model = model.cuda() model = nn.DataParallel(model, device_ids=None) if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.module.classifier = nn.Linear( model.module.classifier.in_features, opt.n_finetune_classes) model.module.classifier = model.module.classifier.cuda() else: model.module.fc = nn.Linear(model.module.fc.in_features, opt.n_finetune_classes) model.module.fc = model.module.fc.cuda() parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters else: if opt.pretrain_path: print('loading pretrained model {}'.format(opt.pretrain_path)) pretrain = torch.load(opt.pretrain_path) assert opt.arch == pretrain['arch'] model.load_state_dict(pretrain['state_dict']) if opt.model == 'densenet': model.classifier = nn.Linear( model.classifier.in_features, opt.n_finetune_classes) else: model.fc = nn.Linear(model.fc.in_features, opt.n_finetune_classes) parameters = get_fine_tuning_parameters(model, opt.ft_begin_index) return model, parameters return model, model.parameters()