class Pytorch2Caffe: def __init__(self, model, save_root, save_name, input_shape=[3, 256, 512]): self.save_root = save_root self.save_name = save_name self.parse = PytorchParser(model, input_shape) self.input_shape = input_shape self.save = { 'structurejson': os.path.join(self.save_root, save_name + '.json'), 'structurepb': os.path.join(self.save_root, save_name + '.pb'), 'weights': os.path.join(self.save_root, save_name + '.npy'), 'caffenetwork': os.path.join(self.save_root, save_name + '.py'), 'caffeweights': os.path.join(self.save_root, save_name + '.cnpy'), 'caffemodel': os.path.join(self.save_root, save_name) } def start(self): print("start to do pytorch to IR") self.parse.run(self.save_root + self.save_name) print("done! then to do IR to caffe code") emitter = CaffeEmitter( (self.save['structurepb'], self.save['weights'])) emitter.run(self.save['caffenetwork'], self.save['caffeweights'], 'test') print("done! then to do ccode to model") MainModel = imp.load_source('MainModel', self.save['caffenetwork']) save_model(MainModel, self.save['caffenetwork'], self.save['caffeweights'], self.save['caffemodel']) print('start to rename inputs') lines = open(self.save['caffemodel'] + '.prototxt', 'r').readlines() new_lines = rename_input(lines, self.input_shape) fp = open(self.save['caffemodel'] + '.prototxt', 'w') fp.writelines(new_lines) print("^~^^~^^~^^~^")
class Pytorch2Caffe: def __init__(self, model, save_root, save_name, input_shape): self.parse = PytorchParser(model, input_shape) self.save_root = save_root # self.save_root = os.path.dirname(os.path.realpath(__file__)) + '/save/mmdnn/' self.save = { 'structurejson': os.path.join(self.save_root, save_name, '.json'), 'structurepb': os.path.join(self.save_root, save_name, '.pb'), 'weights': os.path.join(self.save_root, save_name, '.npy'), 'caffenetwork': os.path.join(self.save_root, save_name, '.py'), 'caffeweights': os.path.join(self.save_root, save_name, '.cnpy'), 'caffemodel': os.path.join(self.save_root, save_name), 'caffeproto': os.path.join(self.save_root, save_name) } def start(self): print("start to do pytorch to IR") self.parse.run(self.save_root) print("done! then to do IR to caffe code") emitter = CaffeEmitter( (self.save['structurepb'], self.save['weights'])) emitter.run(self.save['caffenetwork'], self.save['caffeweights'], 'test') print("done! then to do ccode to model") MainModel = imp.load_source('MainModel', self.save['caffenetwork']) save_model( MainModel, self.save['caffenetwork'], self.save['caffeweights', self.save['caffemodel'], self.save['caffeproto']]) print("done!!!!!!")
def __init__(self, model, save_root, save_name, input_shape): self.parse = PytorchParser(model, input_shape) self.save_root = save_root # self.save_root = os.path.dirname(os.path.realpath(__file__)) + '/save/mmdnn/' self.save = { 'structurejson': os.path.join(self.save_root, save_name, '.json'), 'structurepb': os.path.join(self.save_root, save_name, '.pb'), 'weights': os.path.join(self.save_root, save_name, '.npy'), 'caffenetwork': os.path.join(self.save_root, save_name, '.py'), 'caffeweights': os.path.join(self.save_root, save_name, '.cnpy'), 'caffemodel': os.path.join(self.save_root, save_name), 'caffeproto': os.path.join(self.save_root, save_name) }
def __init__(self, model, save_root, save_name, input_shape=[3, 256, 512]): self.save_root = save_root self.save_name = save_name self.parse = PytorchParser(model, input_shape) self.input_shape = input_shape self.save = { 'structurejson': os.path.join(self.save_root, save_name + '.json'), 'structurepb': os.path.join(self.save_root, save_name + '.pb'), 'weights': os.path.join(self.save_root, save_name + '.npy'), 'caffenetwork': os.path.join(self.save_root, save_name + '.py'), 'caffeweights': os.path.join(self.save_root, save_name + '.cnpy'), 'caffemodel': os.path.join(self.save_root, save_name) }
def PytorchParse(architecture_name, image_path): from mmdnn.conversion.examples.pytorch.extractor import pytorch_extractor from mmdnn.conversion.pytorch.pytorch_parser import PytorchParser # download model architecture_file = pytorch_extractor.download(architecture_name, TestModels.cachedir) # get original model prediction result original_predict = pytorch_extractor.inference(architecture_name, architecture_file, image_path) del pytorch_extractor # get shape func = TestKit.preprocess_func['pytorch'][architecture_name] import inspect funcstr = inspect.getsource(func) pytorch_pre = funcstr.split('(')[0].split('.')[-1] if len(funcstr.split(',')) == 3: size = int(funcstr.split('path,')[1].split(')')[0]) elif len(funcstr.split(',')) == 4: size = int(funcstr.split('path,')[1].split(',')[0]) elif len(funcstr.split(',')) == 11: size = int(funcstr.split('path,')[1].split(',')[0]) # original to IR IR_file = TestModels.tmpdir + 'pytorch_' + architecture_name + "_converted" parser = PytorchParser(architecture_file, [3, size, size]) parser.run(IR_file) del parser del PytorchParser return original_predict
from mmdnn.conversion.pytorch.pytorch_parser import PytorchParser parser = PytorchParser( '/media/slomkarafa/HDD0/Projects/android/py_to_tf/model.pth', [3, 224, 224]) # parser = PytorchParser(dens,(3,224,224)) parser.run('out/sen')
def _convert(args): if args.inputShape != None: inputshape = [] for x in args.inputShape: shape = x.split(',') inputshape.append([int(x) for x in shape]) else: inputshape = [None] if args.srcFramework == 'caffe': from mmdnn.conversion.caffe.transformer import CaffeTransformer transformer = CaffeTransformer(args.network, args.weights, "tensorflow", inputshape[0], phase=args.caffePhase) graph = transformer.transform_graph() data = transformer.transform_data() from mmdnn.conversion.caffe.writer import JsonFormatter, ModelSaver, PyWriter JsonFormatter(graph).dump(args.dstPath + ".json") print("IR network structure is saved as [{}.json].".format( args.dstPath)) prototxt = graph.as_graph_def().SerializeToString() with open(args.dstPath + ".pb", 'wb') as of: of.write(prototxt) print("IR network structure is saved as [{}.pb].".format(args.dstPath)) import numpy as np with open(args.dstPath + ".npy", 'wb') as of: np.save(of, data) print("IR weights are saved as [{}.npy].".format(args.dstPath)) return 0 elif args.srcFramework == 'caffe2': raise NotImplementedError("Caffe2 is not supported yet.") elif args.srcFramework == 'keras': if args.network != None: model = (args.network, args.weights) else: model = args.weights from mmdnn.conversion.keras.keras2_parser import Keras2Parser parser = Keras2Parser(model) elif args.srcFramework == 'tensorflow' or args.srcFramework == 'tf': if args.dstNodeName is None: raise ValueError( "Need to provide the output node of Tensorflow model.") assert args.network or args.weights if not args.network: if args.inNodeName is None: raise ValueError( "Need to provide the input node of Tensorflow model.") if inputshape is None: raise ValueError( "Need to provide the input node shape of Tensorflow model." ) assert len(args.inNodeName) == len(inputshape) from mmdnn.conversion.tensorflow.tensorflow_frozenparser import TensorflowParser2 parser = TensorflowParser2(args.weights, inputshape, args.inNodeName, args.dstNodeName) else: from mmdnn.conversion.tensorflow.tensorflow_parser import TensorflowParser if args.inNodeName and inputshape[0]: parser = TensorflowParser(args.network, args.weights, args.dstNodeName, inputshape[0], args.inNodeName) else: parser = TensorflowParser(args.network, args.weights, args.dstNodeName) elif args.srcFramework == 'mxnet': assert inputshape != None if args.weights == None: model = (args.network, inputshape[0]) else: import re if re.search('.', args.weights): args.weights = args.weights[:-7] prefix, epoch = args.weights.rsplit('-', 1) model = (args.network, prefix, epoch, inputshape[0]) from mmdnn.conversion.mxnet.mxnet_parser import MXNetParser parser = MXNetParser(model) elif args.srcFramework == 'cntk': from mmdnn.conversion.cntk.cntk_parser import CntkParser model = args.network or args.weights parser = CntkParser(model) elif args.srcFramework == 'pytorch': assert inputshape != None from mmdnn.conversion.pytorch.pytorch_parser import PytorchParser model = args.network or args.weights assert model != None parser = PytorchParser(model, inputshape[0]) elif args.srcFramework == 'torch' or args.srcFramework == 'torch7': from mmdnn.conversion.torch.torch_parser import TorchParser model = args.network or args.weights assert model != None parser = TorchParser(model, inputshape[0]) elif args.srcFramework == 'onnx': from mmdnn.conversion.onnx.onnx_parser import ONNXParser parser = ONNXParser(args.network) elif args.srcFramework == 'darknet': from mmdnn.conversion.darknet.darknet_parser import DarknetParser parser = DarknetParser(args.network, args.weights, args.darknetStart) elif args.srcFramework == 'coreml': from mmdnn.conversion.coreml.coreml_parser import CoremlParser parser = CoremlParser(args.network) else: raise ValueError("Unknown framework [{}].".format(args.srcFramework)) parser.run(args.dstPath) return 0
output_fname = sys.argv[2] if len(sys.argv) > 3: DO_CUDA = sys.argv[3] != 'cpu' except: print( "Wrong input format. Try ./extract_hardnet_desc_from_hpatches_file.py imgs/ref.png out.txt gpu" ) sys.exit(1) #model_weights = '../pretrained/train_liberty_with_aug/checkpoint_liberty_with_aug.pth' model_weights = '/home/kangrong/HardNet/hardnet/code/data/models/withoutpool/liberty_train_withoutpool/_liberty_min_as_fliprot/checkpoint_8.pth' model = HardNet() checkpoint = torch.load(model_weights) model.load_state_dict(checkpoint['state_dict']) from mmdnn.conversion.pytorch.pytorch_parser import PytorchParser size = 32 pytorchparser = PytorchParser(model, [1, size, size]) IR_file = 'TFeat_withoutpool' pytorchparser.run(IR_file) ''' model.eval() if DO_CUDA: model.cuda() print('Extracting on GPU') else: print('Extracting on CPU') model = model.cpu() image = cv2.imread(input_img_fname,0) h,w = image.shape print(h,w) n_patches = int(h/w)
def main(): parser = _get_parser() args = parser.parse_args() if args.NN == "VGG19": model = models.vgg19(pretrained=True) elif args.NN == "Inception3": args = Inceptionv3(aux_logits=False) elif args.NN == "ResNet": model = Resnet() elif args.NN == "MobileNet": model = MobileNet() elif args.NN == "MobileNet_": model = MobileNet_() elif args.NN == "MobileNet_2": model = MobileNet_2() elif args.NN == "MobileNet_3": model = MobileNet_3() elif args.NN == "MobileNetV2": model = MobileNetV2() else: model = eval(args.NN)() if args.framework == None: args.framework = "coreml" if args.inputNetwork == None: args.inputNetwork = os.path.join(args.outpath, args.NN + ".pb") if args.inputWeight == None: args.inputWeight = os.path.join(args.outpath, args.NN + ".npy") if args.output == None: args.output = os.path.join(args.outpath, args.NN + ".mlmodel") IR_file = args.NN #model.load_state_dict(torch.load(args.resume_model)) if args.is_checkpoint == 1: checkpoint = torch.load(args.resume) state_dict = checkpoint['state_dict'] model.load_state_dict(state_dict) optimizer_state_dict = checkpoint['optimizer'] else: model.load_state_dict(torch.load(args.resume)) model.eval() model = model.cpu().float() ''' for p in model.model.parameters(): p.requires_grad = False for p in model.output.parameters(): p.requires_grad = False ''' model.eval() dummy_input = Variable(torch.randn(1, 3, args.input_size, args.input_size)) model(dummy_input) pytorchparser = PytorchParser(model, [3, args.input_size, args.input_size]) #dummy_input = Variable(torch.randn(1, 3, args.input_size, args.input_size)) #model(dummy_input) pytorchparser.run(os.path.join(args.outpath, IR_file)) _convert(args)
eval_acc = 0 model.eval() for img, label in test_data: img, label = img.to(device), label.to(device) output = model(img) loss = criterion(output, label) eval_loss += loss.item() _, pred = output.max(1) num_correct = (pred == label).sum().item() acc = num_correct / img.shape[0] eval_acc += acc etrain_loss = train_loss / len(train_data) etrain_acc = train_acc / len(train_data) eeval_loss = eval_loss / len(test_data) eeval_acc = eval_acc / len(test_data) print( 'epoch:{}, Train Loss:{:.6f}, Train Acc:{:.6f}, Eval Loss: {:.6f}, Eval Acc:{:.6f}' .format(e, etrain_loss, etrain_acc, eeval_loss, eeval_acc)) model.load_state_dict(torch.load('vgg13.pth')) print('loading vgg13.pth') torch.save(model, 'cpuvgg13.pth') print('Finish') torch.save(model.state_dict(), 'vgg13.pth') from mmdnn.conversion.pytorch.pytorch_parser import PytorchParser parser = PytorchParser('model_vgg13.pth', [1, 48, 48]) IR_file = 'vgg13bn' parser.run(IR_file)