def _get_resnet(self): my_resnet = getattr(resnet, 'resnet101')() my_resnet.load_state_dict(torch.load(self._resnet_model_path)) my_resnet = myResnet(my_resnet) my_resnet.eval() return my_resnet
def main(params): print(device) net = getattr(resnet, params['model'])() net.load_state_dict( torch.load(os.path.join(params['model_root'], params['model'] + '.pth'))) my_resnet = myResnet(net) #my_resnet.cuda() my_resnet.to(device) my_resnet.eval() imgs = json.load(open(params['input_json'], 'r')) imgs = imgs['images'] N = len(imgs) seed(123) # make reproducible dir_fc = params['output_dir'] + '_fc' dir_att = params['output_dir'] + '_att' if not os.path.isdir(dir_fc): os.mkdir(dir_fc) if not os.path.isdir(dir_att): os.mkdir(dir_att) resume_flag = False for i, img in enumerate(imgs): # load the image #I = skimage.io.imread(os.path.join(params['images_root'], img['filepath'], img['filename'])) print('The image path is', img['file_path']) # if img['file_path'] == '/media/hdd/data/imcaption/retina_dataset_resize/resize/1_2_826_0_1_3680043_9_5115_636252259520332334/1_3_6_1_4_1_33437_10_4_11578754_13134123662_18471_4_1_0_0.png': # resume_flag = True # if resume_flag == True: I = skimage.io.imread(img['file_path']) I = I[:, :, 0:3] # print('The size of image I', I.shape) # plt.imshow(I[:,:,0:3]) # plt.show() # handle grayscale input images if len(I.shape) == 2: I = I[:, :, np.newaxis] I = np.concatenate((I, I, I), axis=2) I = I.astype('float32') / 255.0 I = torch.from_numpy(I.transpose([2, 0, 1])).to(device) #.cuda() I = preprocess(I) with torch.no_grad(): tmp_fc, tmp_att = my_resnet(I, params['att_size']) # write to pkl np.save(os.path.join(dir_fc, str(img['imgid'])), tmp_fc.data.cpu().float().numpy()) np.savez_compressed(os.path.join(dir_att, str(img['imgid'])), feat=tmp_att.data.cpu().float().numpy()) if i % 10 == 0: print('processing %d/%d (%.2f%% done)' % (i, N, i * 100.0 / N)) print('wrote ', params['output_dir'])
def main(params): net = getattr(resnet, params['model'])() net.load_state_dict( torch.load(os.path.join(params['model_root'], params['model'] + '.pth'))) my_resnet = myResnet(net) my_resnet.cuda() my_resnet.eval() imgs = json.load(open(params['input_json'], 'r')) imgs = imgs['images'] N = len(imgs) seed(123) # make reproducible dir_fc = params['output_dir'] + '_fc' dir_att = params['output_dir'] + '_att' if not os.path.isdir(dir_fc): os.mkdir(dir_fc) if not os.path.isdir(dir_att): os.mkdir(dir_att) for i, img in enumerate(imgs): # load the image I = skimage.io.imread( os.path.join(params['images_root'], img['filepath'], img['filename'])) # handle grayscale input images if len(I.shape) == 2: I = I[:, :, np.newaxis] I = np.concatenate((I, I, I), axis=2) I = I.astype('float32') / 255.0 I = torch.from_numpy(I.transpose([2, 0, 1])).cuda() I = preprocess(I) with torch.no_grad(): tmp_fc, tmp_att = my_resnet(I, params['att_size']) # write to pkl np.save(os.path.join(dir_fc, str(img['cocoid'])), tmp_fc.data.cpu().float().numpy()) np.savez_compressed(os.path.join(dir_att, str(img['cocoid'])), feat=tmp_att.data.cpu().float().numpy()) if i % 1000 == 0: print( 'Grid Feature Preprocess:', 'processing {}/{} images ({:.2f%} done)'.format( i, N, i * 100.0 / N)) print('wrote ', params['output_dir'])
#with torch.cuda.device(): #args = get_args() #net = WDSR_B(args) # net = models.densenet161() ''' ################################ # Build dataloader ################################ loader = DataLoader(opt) opt.vocab_size = loader.vocab_size opt.seq_length = loader.seq_length ########################## # Build model ########################## opt.vocab = loader.get_vocab() model = models.setup(opt).cuda() del opt.vocab ''' cnn_model = 'resnet101' my_resnet = getattr(resnet, cnn_model)() my_resnet.load_state_dict( torch.load('data/imagenet_weights/' + cnn_model + '.pth')) net = myResnet(my_resnet) flops, params = get_model_complexity_info(net, (3, 640, 427), as_strings=True, print_per_layer_stat=True) # flops, params = get_model_complexity_info(net, (3, 224, 224), as_strings=True, print_per_layer_stat=True) print('{:<30} {:<8}'.format('Computational complexity: ', flops)) print('{:<30} {:<8}'.format('Number of parameters: ', params)) # 144P(256×144) 240p(426×240) 360P(640×360) 480P(854×480)
# slice_sizes = [512,512,512,198,198] # devices = [50,50,50,50,500] # for folder, slice_size, device in zip(model_folder,slice_sizes,devices): # print(folder) # model = ResNet50_1d(args.slice_size,args.devices) # Input arguments and options parser = argparse.ArgumentParser() # Input paths parser.add_argument('--cnn_model', type=str, default='resnet101', help='resnet101, resnet152') opts.add_eval_options(parser) opts.add_diversity_opts(parser) opt = parser.parse_args() cnn_model = 'resnet101' my_resnet = getattr(resnet, cnn_model)() my_resnet.load_state_dict( torch.load('data/imagenet_weights/' + cnn_model + '.pth')) model = myResnet(my_resnet) input = torch.randn(3, 640, 480) model.train(False) model.eval() macs, params = profile(model, inputs=(input, )) # flops, params = get_model_complexity_info(net, (3, 224, 224), as_strings=True, print_per_layer_stat=True) print('{:<30} {:<8}'.format('Computational complexity: ', macs / pow(10, 9))) # GMACs print('{:<30} {:<8}'.format('Number of parameters: ', params / pow(10, 6))) # M
from PIL import Image params = {} params['input_json'] = 'data/dataset_retina_resize.json' params['output_json'] = 'data/retina_resize' params['images_root'] = '' params['att_size'] = 14 params['model'] = 'resnet101' params['model_root'] = 'data/imagenet_weights' newsize = (256, 256) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") net = getattr(resnet, params['model'])() net.load_state_dict( torch.load(os.path.join(params['model_root'], params['model'] + '.pth'))) my_resnet = myResnet(net) #my_resnet.cuda() my_resnet.to(device) my_resnet.eval() file_path = '/media/hdd/data/imcaption/retina_dataset_resize/resize/1_2_826_0_1_3680043_9_5115_636252259520332334/1_3_6_1_4_1_33437_10_4_11578754_13134123662_18471_4_1_0_0.png' # I = skimage.io.imread(file_path) # Resample image original_image_path = '/media/hdd/data/imcaption/retina_dataset_resize/out/1_2_826_0_1_3680043_9_5115_636252259520332334/1_3_6_1_4_1_33437_10_4_11578754_13134123662_18471_4_1_0_0.png' image = Image.open(original_image_path) # print('image size before resizing', image.size) reim = image.resize(newsize) # im.save(imgPath) # print('image size after resizing', reim.size) imsavepath = '/media/hdd/data/imcaption/retina_dataset_resize/resize/1_2_826_0_1_3680043_9_5115_636252259520332334/1_3_6_1_4_1_33437_10_4_11578754_13134123662_18471_4_1_0_0.png'