def __init__(self, image_feature_dim, output_dim, time_step, adjacency_matrix, word_features, num_classes=80, word_feature_dim=300): super(SSGRL, self).__init__() self.resnet_101 = resnet101() self.num_classes = num_classes self.word_feature_dim = word_feature_dim self.image_feature_dim = image_feature_dim self.word_semantic = semantic(num_classes=self.num_classes, image_feature_dim=self.image_feature_dim, word_feature_dim=self.word_feature_dim) self.word_features = word_features self._word_features = self.load_features() self.adjacency_matrix = adjacency_matrix self._in_matrix, self._out_matrix = self.load_matrix() self.time_step = time_step self.graph_net = GGNN(input_dim=self.image_feature_dim, time_step=self.time_step, in_matrix=self._in_matrix, out_matrix=self._out_matrix) self.output_dim = output_dim self.fc_output = nn.Linear(2 * self.image_feature_dim, self.output_dim) self.classifiers = Element_Wise_Layer(self.num_classes, self.output_dim)
def main(): global args, best_prec1 args = parser.parse_args() print(args) best_acc = 0 # Create dataloader print('====> Creating dataloader...') data_dir = args.data train_list = args.trainlist dataset_name = args.dataset train_loader = get_dataset(dataset_name, data_dir, train_list) # load network if args.backbone == 'resnet50': model = resnet50(num_classes=args.num_classes) elif args.backbone == 'resnet101': model = resnet101(num_classes=args.num_classes) if args.weights != '': try: ckpt = torch.load(args.weights) model.module.load_state_dict(ckpt['state_dict']) print('!!!load weights success !! path is ', args.weights) except Exception as e: model_init(args.weights, model) model = torch.nn.DataParallel(model) model.cuda() mkdir_if_missing(args.save_dir) optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=10e-3) criterion = nn.CrossEntropyLoss().cuda() cudnn.benchmark = True for epoch in range(args.start_epoch, args.epochs + 1): # try not adjust learning rate #adjust_lr(optimizer, epoch) train(train_loader, model, criterion, optimizer, epoch) if epoch % args.val_step == 0: save_checkpoint(model, epoch, optimizer) ''' if epoch% args.val_step == 0: acc = validate(test_loader, model, criterion) is_best = acc > best_acc best_acc = max(acc, best_acc) save_checkpoint({ 'state_dict': model.module.state_dict(), 'epoch': epoch, }, is_best=is_best,train_batch=60000, save_dir=args.save_dir, filename='checkpoint_ep' + str(epoch) + '.pth.tar') ''' return
def __init__(self, net_type, image_size=32, args=None): super(StrongDisc, self).__init__() self.net_type = net_type if net_type == 'inception_v3': self.net = inception_v3.inception_v3(pretrained=True, image_size=image_size) elif net_type == 'resnet18': self.net = resnet.resnet18(pretrained=True) elif net_type == 'resnet34': self.net = resnet.resnet34(pretrained=True) elif net_type == 'resnet50': self.net = resnet.resnet50(pretrained=True) elif net_type == 'resnet101': self.net = resnet.resnet101(pretrained=True) elif net_type == 'darts': self.net = darts.AugmentCNNOneOutput(model_path=args.darts_model) else: assert 0
def main(): global args args = parser.parse_args() print(args) # Create dataloader print('====> Creating dataloader...') query_dir = args.querypath query_list = args.querylist gallery_dir = args.gallerypath gallery_list = args.gallerylist dataset_name = args.dataset query_loader, gallery_loader = get_dataset(dataset_name, query_dir, query_list, gallery_dir, gallery_list) # load network if args.backbone == 'resnet50': model = resnet50(num_classes=args.num_classes) elif args.backbone == 'resnet101': model = resnet101(num_classes=args.num_classes) print(args.weights) if args.weights != '': try: model = torch.nn.DataParallel(model) ckpt = torch.load(args.weights) model.load_state_dict(ckpt['state_dict']) print('!!!load weights success !!! path is ', args.weights) except Exception as e: print('!!!load weights failed !!! path is ', args.weights) return else: print('!!!Load Weights PATH ERROR!!!') return model.cuda() mkdir_if_missing(args.save_dir) cudnn.benchmark = True evaluate(query_loader, gallery_loader, model) return
def define_model(model_type, pretrained_path='', neighbour_slice=args.neighbour_slice, input_type=args.input_type, output_type=args.output_type): if input_type == 'diff_img': input_channel = neighbour_slice - 1 else: input_channel = neighbour_slice if model_type == 'prevost': model_ft = generators.PrevostNet() elif model_type == 'resnext50': model_ft = resnext.resnet50(sample_size=2, sample_duration=16, cardinality=32) model_ft.conv1 = nn.Conv3d(in_channels=1, out_channels=64, kernel_size=(3, 7, 7), stride=(1, 2, 2), padding=(1, 3, 3), bias=False) elif model_type == 'resnext101': model_ft = resnext.resnet101(sample_size=2, sample_duration=16, cardinality=32) model_ft.conv1 = nn.Conv3d(in_channels=1, out_channels=64, kernel_size=(3, 7, 7), stride=(1, 2, 2), padding=(1, 3, 3), bias=False) # model_ft.conv1 = nn.Conv3d(neighbour_slice, 64, kernel_size=7, stride=(1, 2, 2), # padding=(3, 3, 3), bias=False) elif model_type == 'resnet152': model_ft = resnet.resnet152(pretrained=True) model_ft.conv1 = nn.Conv2d(input_channel, 64, kernel_size=7, stride=2, padding=3, bias=False) elif model_type == 'resnet101': model_ft = resnet.resnet101(pretrained=True) model_ft.conv1 = nn.Conv2d(input_channel, 64, kernel_size=7, stride=2, padding=3, bias=False) elif model_type == 'resnet50': model_ft = resnet.resnet50(pretrained=True) model_ft.conv1 = nn.Conv2d(input_channel, 64, kernel_size=7, stride=2, padding=3, bias=False) elif model_type == 'resnet34': model_ft = resnet.resnet34(pretrained=False) model_ft.conv1 = nn.Conv2d(input_channel, 64, kernel_size=7, stride=2, padding=3, bias=False) elif model_type == 'resnet18': model_ft = resnet.resnet18(pretrained=True) model_ft.conv1 = nn.Conv2d(input_channel, 64, kernel_size=7, stride=2, padding=3, bias=False) elif model_type == 'mynet': model_ft = mynet.resnet50(sample_size=2, sample_duration=16, cardinality=32) model_ft.conv1 = nn.Conv3d(in_channels=1, out_channels=64, kernel_size=(3, 7, 7), stride=(1, 2, 2), padding=(0, 3, 3), bias=False) elif model_type == 'mynet2': model_ft = generators.My3DNet() elif model_type == 'p3d': model_ft = p3d.P3D63() model_ft.conv1_custom = nn.Conv3d(1, 64, kernel_size=(1, 7, 7), stride=(1, 2, 2), padding=(0, 3, 3), bias=False) elif model_type == 'densenet121': model_ft = densenet.densenet121() else: print('network type of <{}> is not supported, use original instead'. format(network_type)) model_ft = generators.PrevostNet() num_ftrs = model_ft.fc.in_features if model_type == 'mynet': num_ftrs = 384 elif model_type == 'prevost': num_ftrs = 576 if output_type == 'average_dof' or output_type == 'sum_dof': # model_ft.fc = nn.Linear(128, 6) model_ft.fc = nn.Linear(num_ftrs, 6) else: # model_ft.fc = nn.Linear(128, (neighbour_slice - 1) * 6) model_ft.fc = nn.Linear(num_ftrs, (neighbour_slice - 1) * 6) # if args.training_mode == 'finetune': # model_path = path.join(results_dir, args.model_filename) # if path.isfile(model_path): # print('Loading model from <{}>...'.format(model_path)) # model_ft.load_state_dict(torch.load(model_path)) # print('Done') # else: # print('<{}> not exists! Training from scratch...'.format(model_path)) if pretrained_path: if path.isfile(pretrained_path): print('Loading model from <{}>...'.format(pretrained_path)) model_ft.load_state_dict( torch.load(pretrained_path, map_location='cuda:0')) # model_ft.load_state_dict(torch.load(pretrained_path)) print('Done') else: print('<{}> not exists! Training from scratch...'.format( pretrained_path)) else: print('Train this model from scratch!') model_ft.cuda() model_ft = model_ft.to(device) print('define model device {}'.format(device)) return model_ft
import torch import torch.nn as nn from networks import resnet net = resnet.resnet101(pretrained=False) print(net) # return same hierarchy as net for i, layer in enumerate(net.children()): print(i, layer) # return expaand hierarchy of net for i, layer in enumerate(net.modules()): print(i, layer) # return layer sequentially which no layer duplicate with nn.Sequential layer_cnt = 0 for i, layer in enumerate(net.modules()): if not isinstance(layer, nn.Sequential) and not isinstance( layer, resnet.Bottleneck): layer_cnt = layer_cnt + 1 print(i, layer) print('total layer {}'.format(layer_cnt))
def __init__(self, layers=50, bins=(1, 2, 3, 6), dropout=0.1, classes=2, zoom_factor=8, use_ppm=True, criterion=nn.CrossEntropyLoss(ignore_index=255), BatchNorm=nn.BatchNorm2d, pretrained=True): super(PSPNet, self).__init__() assert layers in [18, 50, 101, 152] assert 2048 % len(bins) == 0 assert classes > 1 assert zoom_factor in [1, 2, 4, 8] self.zoom_factor = zoom_factor self.use_ppm = use_ppm self.criterion = criterion models.BatchNorm = BatchNorm if layers == 50: resnet = models.resnet50(pretrained=pretrained) elif layers == 101: resnet = models.resnet101(pretrained=pretrained) elif layers == 18: resnet = models.resnet18(pretrained=pretrained) else: resnet = models.resnet152(pretrained=pretrained) if layers == 18: self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool) self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4 for n, m in self.layer3.named_modules(): if 'conv1' in n: # print('find conv1', m) m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1) elif 'downsample.0' in n: # print('find downsample.0', m) m.stride = (1, 1) for n, m in self.layer4.named_modules(): if 'conv1' in n: m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1) elif 'downsample.0' in n: m.stride = (1, 1) fea_dim = 512 else: self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.conv2, resnet.bn2, resnet.relu, resnet.conv3, resnet.bn3, resnet.relu, resnet.maxpool) self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4 for n, m in self.layer3.named_modules(): if 'conv2' in n: # print('find conv2',m) m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1) elif 'downsample.0' in n: # print('find downsample.0',m) m.stride = (1, 1) for n, m in self.layer4.named_modules(): if 'conv2' in n: m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1) elif 'downsample.0' in n: m.stride = (1, 1) fea_dim = 2048 # print('======*********=============') if use_ppm: self.ppm = PPM(fea_dim, int(fea_dim / len(bins)), bins, BatchNorm) fea_dim *= 2 self.cls = nn.Sequential( nn.Conv2d(fea_dim, 512, kernel_size=3, padding=1, bias=False), BatchNorm(512), nn.ReLU(inplace=True), nn.Dropout2d(p=dropout), # nn.Conv2d(512, classes, kernel_size=1) nn.Conv2d(512, classes, kernel_size=1) if classes > 2 else nn.Conv2d(512, 1, kernel_size=1)) if self.training: self.aux = nn.Sequential( nn.Conv2d(1024, 256, kernel_size=3, padding=1, bias=False) if layers != 18 else nn.Conv2d( 256, 256, kernel_size=3, padding=1, bias=False), BatchNorm(256), nn.ReLU(inplace=True), nn.Dropout2d(p=dropout), # nn.Conv2d(256, classes, kernel_size=1) nn.Conv2d(256, classes, kernel_size=1) if classes > 2 else nn.Conv2d(256, 1, kernel_size=1))