class Pytorch2Caffe: def __init__(self, model, save_root, save_name, input_shape): self.parse = PytorchParser(model, input_shape) self.save_root = save_root # self.save_root = os.path.dirname(os.path.realpath(__file__)) + '/save/mmdnn/' self.save = { 'structurejson': os.path.join(self.save_root, save_name, '.json'), 'structurepb': os.path.join(self.save_root, save_name, '.pb'), 'weights': os.path.join(self.save_root, save_name, '.npy'), 'caffenetwork': os.path.join(self.save_root, save_name, '.py'), 'caffeweights': os.path.join(self.save_root, save_name, '.cnpy'), 'caffemodel': os.path.join(self.save_root, save_name), 'caffeproto': os.path.join(self.save_root, save_name) } def start(self): print("start to do pytorch to IR") self.parse.run(self.save_root) print("done! then to do IR to caffe code") emitter = CaffeEmitter( (self.save['structurepb'], self.save['weights'])) emitter.run(self.save['caffenetwork'], self.save['caffeweights'], 'test') print("done! then to do ccode to model") MainModel = imp.load_source('MainModel', self.save['caffenetwork']) save_model( MainModel, self.save['caffenetwork'], self.save['caffeweights', self.save['caffemodel'], self.save['caffeproto']]) print("done!!!!!!")
class Pytorch2Caffe: def __init__(self, model, save_root, save_name, input_shape=[3, 256, 512]): self.save_root = save_root self.save_name = save_name self.parse = PytorchParser(model, input_shape) self.input_shape = input_shape self.save = { 'structurejson': os.path.join(self.save_root, save_name + '.json'), 'structurepb': os.path.join(self.save_root, save_name + '.pb'), 'weights': os.path.join(self.save_root, save_name + '.npy'), 'caffenetwork': os.path.join(self.save_root, save_name + '.py'), 'caffeweights': os.path.join(self.save_root, save_name + '.cnpy'), 'caffemodel': os.path.join(self.save_root, save_name) } def start(self): print("start to do pytorch to IR") self.parse.run(self.save_root + self.save_name) print("done! then to do IR to caffe code") emitter = CaffeEmitter( (self.save['structurepb'], self.save['weights'])) emitter.run(self.save['caffenetwork'], self.save['caffeweights'], 'test') print("done! then to do ccode to model") MainModel = imp.load_source('MainModel', self.save['caffenetwork']) save_model(MainModel, self.save['caffenetwork'], self.save['caffeweights'], self.save['caffemodel']) print('start to rename inputs') lines = open(self.save['caffemodel'] + '.prototxt', 'r').readlines() new_lines = rename_input(lines, self.input_shape) fp = open(self.save['caffemodel'] + '.prototxt', 'w') fp.writelines(new_lines) print("^~^^~^^~^^~^")
def PytorchParse(architecture_name, image_path): from mmdnn.conversion.examples.pytorch.extractor import pytorch_extractor from mmdnn.conversion.pytorch.pytorch_parser import PytorchParser # download model architecture_file = pytorch_extractor.download(architecture_name, TestModels.cachedir) # get original model prediction result original_predict = pytorch_extractor.inference(architecture_name, architecture_file, image_path) del pytorch_extractor # get shape func = TestKit.preprocess_func['pytorch'][architecture_name] import inspect funcstr = inspect.getsource(func) pytorch_pre = funcstr.split('(')[0].split('.')[-1] if len(funcstr.split(',')) == 3: size = int(funcstr.split('path,')[1].split(')')[0]) elif len(funcstr.split(',')) == 4: size = int(funcstr.split('path,')[1].split(',')[0]) elif len(funcstr.split(',')) == 11: size = int(funcstr.split('path,')[1].split(',')[0]) # original to IR IR_file = TestModels.tmpdir + 'pytorch_' + architecture_name + "_converted" parser = PytorchParser(architecture_file, [3, size, size]) parser.run(IR_file) del parser del PytorchParser return original_predict
from mmdnn.conversion.pytorch.pytorch_parser import PytorchParser parser = PytorchParser( '/media/slomkarafa/HDD0/Projects/android/py_to_tf/model.pth', [3, 224, 224]) # parser = PytorchParser(dens,(3,224,224)) parser.run('out/sen')
DO_CUDA = sys.argv[3] != 'cpu' except: print( "Wrong input format. Try ./extract_hardnet_desc_from_hpatches_file.py imgs/ref.png out.txt gpu" ) sys.exit(1) #model_weights = '../pretrained/train_liberty_with_aug/checkpoint_liberty_with_aug.pth' model_weights = '/home/kangrong/HardNet/hardnet/code/data/models/withoutpool/liberty_train_withoutpool/_liberty_min_as_fliprot/checkpoint_8.pth' model = HardNet() checkpoint = torch.load(model_weights) model.load_state_dict(checkpoint['state_dict']) from mmdnn.conversion.pytorch.pytorch_parser import PytorchParser size = 32 pytorchparser = PytorchParser(model, [1, size, size]) IR_file = 'TFeat_withoutpool' pytorchparser.run(IR_file) ''' model.eval() if DO_CUDA: model.cuda() print('Extracting on GPU') else: print('Extracting on CPU') model = model.cpu() image = cv2.imread(input_img_fname,0) h,w = image.shape print(h,w) n_patches = int(h/w) print('Amount of patches: {}'.format(n_patches))
def main(): parser = _get_parser() args = parser.parse_args() if args.NN == "VGG19": model = models.vgg19(pretrained=True) elif args.NN == "Inception3": args = Inceptionv3(aux_logits=False) elif args.NN == "ResNet": model = Resnet() elif args.NN == "MobileNet": model = MobileNet() elif args.NN == "MobileNet_": model = MobileNet_() elif args.NN == "MobileNet_2": model = MobileNet_2() elif args.NN == "MobileNet_3": model = MobileNet_3() elif args.NN == "MobileNetV2": model = MobileNetV2() else: model = eval(args.NN)() if args.framework == None: args.framework = "coreml" if args.inputNetwork == None: args.inputNetwork = os.path.join(args.outpath, args.NN + ".pb") if args.inputWeight == None: args.inputWeight = os.path.join(args.outpath, args.NN + ".npy") if args.output == None: args.output = os.path.join(args.outpath, args.NN + ".mlmodel") IR_file = args.NN #model.load_state_dict(torch.load(args.resume_model)) if args.is_checkpoint == 1: checkpoint = torch.load(args.resume) state_dict = checkpoint['state_dict'] model.load_state_dict(state_dict) optimizer_state_dict = checkpoint['optimizer'] else: model.load_state_dict(torch.load(args.resume)) model.eval() model = model.cpu().float() ''' for p in model.model.parameters(): p.requires_grad = False for p in model.output.parameters(): p.requires_grad = False ''' model.eval() dummy_input = Variable(torch.randn(1, 3, args.input_size, args.input_size)) model(dummy_input) pytorchparser = PytorchParser(model, [3, args.input_size, args.input_size]) #dummy_input = Variable(torch.randn(1, 3, args.input_size, args.input_size)) #model(dummy_input) pytorchparser.run(os.path.join(args.outpath, IR_file)) _convert(args)
eval_acc = 0 model.eval() for img, label in test_data: img, label = img.to(device), label.to(device) output = model(img) loss = criterion(output, label) eval_loss += loss.item() _, pred = output.max(1) num_correct = (pred == label).sum().item() acc = num_correct / img.shape[0] eval_acc += acc etrain_loss = train_loss / len(train_data) etrain_acc = train_acc / len(train_data) eeval_loss = eval_loss / len(test_data) eeval_acc = eval_acc / len(test_data) print( 'epoch:{}, Train Loss:{:.6f}, Train Acc:{:.6f}, Eval Loss: {:.6f}, Eval Acc:{:.6f}' .format(e, etrain_loss, etrain_acc, eeval_loss, eeval_acc)) model.load_state_dict(torch.load('vgg13.pth')) print('loading vgg13.pth') torch.save(model, 'cpuvgg13.pth') print('Finish') torch.save(model.state_dict(), 'vgg13.pth') from mmdnn.conversion.pytorch.pytorch_parser import PytorchParser parser = PytorchParser('model_vgg13.pth', [1, 48, 48]) IR_file = 'vgg13bn' parser.run(IR_file)