def inference(): model_name = 'AtrousFCN_DeResnet50_asUnet' weight_file = 'checkpoint_weights_3.hdf5' model_input_size = (720, 720) batch_shape = (1, ) + model_input_size + (3, ) model_weight_path = os.path.join('kerasModel', weight_file) keras_model = globals()[model_name](batch_shape=batch_shape, input_shape=(model_input_size[0], model_input_size[1], 3)) keras_model.load_weights(model_weight_path, by_name=True) input_data = np.random.random(keras_model.input.shape) caffe_model_dir = 'caffeModel' caffe_net_file = os.path.join(caffe_model_dir, 'caffeModel_deploy_3.prototxt') caffe_params_file = os.path.join(caffe_model_dir, 'caffeModel_3.caffemodel') keras2caffe.convert(keras_model, caffe_net_file, caffe_params_file) caffe_model = caffe.Net(caffe_net_file, caffe_params_file, caffe.TEST) caffe_model.blobs['data'].data[...] = np.transpose( input_data, (0, 3, 1, 2)) #执行上面设置的图片预处理操作,并将图片载入到blob�? caffe_out = caffe_model.forward() print(caffe_out) output = caffe_out['batch_normalization_6'] output = np.transpose(output, (0, 2, 3, 1)) print('caffe_out ' + str(output.shape)) print(output[0, :, :, 0]) keras_out = keras_model.predict(input_data, batch_size=1) print( 'keras_out' + str(keras_out.shape) + '========================================================================================================================' ) print(keras_out[0, :, :, 0]) for layer_name, blob in caffe_model.blobs.iteritems(): print(layer_name + '\t' + str(blob.data.shape))
keras_model = load_model(MODEL_HDF5) else: keras_model = load_model(MODEL_HDF5) keras_model.summary() # ---------------------------------------------- # convert to caffe model # ---------------------------------------------- if OPTIONAL_MODE == "caffemodel": os.environ["GLOG_minloglevel"] = "2" import caffe import keras2caffe prototxt = DATASET_ROOT_PATH + 'pretrain/agegender_' + ANNOTATIONS + '_' + MODELS + DATASET_NAME + '.prototxt' caffemodel = DATASET_ROOT_PATH + 'pretrain/agegender_' + ANNOTATIONS + '_' + MODELS + DATASET_NAME + '.caffemodel' keras2caffe.convert(keras_model, prototxt, caffemodel) # ---------------------------------------------- # Benchmark # ---------------------------------------------- if OPTIONAL_MODE == "benchmark": BENCHMARK_DATASET_NAME = "imdb" BENCHMARK_DATASET_TARGET = "validation" BATCH_SIZE = 64 shape = keras_model.layers[0].get_output_at(0).get_shape().as_list() disp_generator = ImageDataGenerator(rescale=1.0 / 255).flow_from_directory( DATASET_ROOT_PATH + 'dataset/agegender_' + BENCHMARK_DATASET_NAME + '/annotations/' + ANNOTATIONS + '/' + BENCHMARK_DATASET_TARGET,
#converting keras_model = MobileNet(input_shape=(224, 224, 3), alpha=1.0, depth_multiplier=1, dropout=1e-3, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000) caffe_proto = 'MobileNet.prototxt' caffe_weights = 'MobileNet.caffemodel' keras2caffe.convert(keras_model, caffe_proto, caffe_weights) #testing the model net = caffe.Net(caffe_proto, caffe_weights, caffe.TEST) img = cv2.imread(DATA_DIR + 'bear.jpg') img = cv2.resize(img, (224, 224)) img = img[..., ::-1] #RGB 2 BGR data = np.array(img, dtype=np.float32) data = data.transpose((2, 0, 1)) data.shape = (1, ) + data.shape data /= 128 data -= 1.0
config.gpu_options.per_process_gpu_memory_fraction = 0.5 set_session(tf.Session(config=config)) import keras2caffe import sys sys.path.append('/home/data/keras-models/keras-inceptionV4') import inception_v4 #import evalute_image #converting keras_model = inception_v4.create_model(weights='imagenet', include_top=True, dropout_prob=0.8) keras2caffe.convert(keras_model, 'InceptionV4.prototxt', 'InceptionV4.caffemodel') #testing the model caffe.set_mode_gpu() net = caffe.Net('InceptionV4.prototxt', 'InceptionV4.caffemodel', caffe.TEST) img = cv2.imread('bear.jpg') #img = evaluate_image.central_crop(im, 0.875) img = cv2.resize(img, (299, 299)) img = img[...,::-1] #RGB 2 BGR data = np.array(img, dtype=np.float32) data = data.transpose((2, 0, 1)) data.shape = (1,) + data.shape
MODEL_HDF5 = 'pretrain/gender_mini_XCEPTION.21-0.95.hdf5' ANNOTATION_WORDS = 'words/agegender_gender_words.txt' IMAGE_SIZE = 32 if (MODELS == 'simple_cnn'): IMAGE_SIZE = 48 if (MODELS == 'miniXception'): IMAGE_SIZE = 64 if (MODELS == 'vgg16'): IMAGE_SIZE = 224 keras_model = load_model(MODEL_HDF5) keras_model.summary() keras2caffe.convert( keras_model, 'pretrain/agegender_' + ANNOTATIONS + '_' + MODELS + '.prototxt', 'pretrain/agegender_' + ANNOTATIONS + '_' + MODELS + '.caffemodel') net = caffe.Net( 'pretrain/agegender_' + ANNOTATIONS + '_' + MODELS + '.prototxt', 'pretrain/agegender_' + ANNOTATIONS + '_' + MODELS + '.caffemodel', caffe.TEST) # ---------------------------------------------- # data # ---------------------------------------------- #img = cv2.imread('dataset/agegender/annotations/agegender/validation/0_0-2_m/landmark_aligned_face.84.8277643357_43f107482d_o.jpg') img = cv2.imread( 'dataset/agegender/annotations/agegender/validation/11_15-20_f/landmark_aligned_face.290.11594063605_713764ddeb_o.jpg' )
from keras.applications.inception_v3 import InceptionV3 #TensorFlow backend uses all GPU memory by default, so we need limit import tensorflow as tf from keras.backend.tensorflow_backend import set_session config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.5 set_session(tf.Session(config=config)) #converting keras_model = InceptionV3(input_shape=(299, 299, 3), weights='imagenet', include_top=True) keras2caffe.convert(keras_model, 'deploy.prototxt', 'InceptionV3.caffemodel') #testing the model caffe.set_mode_gpu() net = caffe.Net('deploy.prototxt', 'InceptionV3.caffemodel', caffe.TEST) img = cv2.imread(DATA_DIR + 'bear.jpg') img = cv2.resize(img, (299, 299)) img = img[..., ::-1] #RGB 2 BGR data = np.array(img, dtype=np.float32) data = data.transpose((2, 0, 1)) data.shape = (1, ) + data.shape data /= 128
sys.path.insert(0, parentdir) import Models from Models import build_model parser = argparse.ArgumentParser() parser.add_argument("--model_name", type=str, default="unet") parser.add_argument("--n_classes", type=int, default=2) parser.add_argument("--input_height", type=int, default=224) parser.add_argument("--input_width", type=int, default=224) parser.add_argument("--input_model", type=str, default="../weights/xxx.hd55") parser.add_argument("--output_model", type=str, default="./unet.prototxt") parser.add_argument("--output_weight", type=str, default="./unet.caffemodel") args = parser.parse_args() n_classes = args.n_classes input_height = args.input_height input_width = args.input_width model_name = args.model_name input_model_path = args.input_model output_model = args.output_model output_weight = args.output_weight model = build_model(model_name, n_classes, input_height=input_height, input_width=input_width) model.load_weights(input_model_path) keras2caffe.convert(model, output_model, output_weight)
import sys sys.path.append('../../') import keras2caffe from keras.models import load_model model = load_model('simple_CNN.81-0.96.hdf5') keras2caffe.convert(model, 'deploy.prototxt', 'weights.caffemodel')
for i in range(48): mean_and_std = mean_and_std + str(data_mean_3d[i]) + "," mean_and_std = mean_and_std + "};\n" mean_and_std = mean_and_std + "float data_std_3d[32]={" for i in range(48): mean_and_std = mean_and_std + str(data_std_3d[i]) + "," mean_and_std = mean_and_std + "};\n" print(mean_and_std) #converting keras_model = load_model('3d-pose-baseline.hdf5') keras_model.summary() keras2caffe.convert(keras_model, '3d-pose-baseline.prototxt', '3d-pose-baseline.caffemodel') net = caffe.Net('3d-pose-baseline.prototxt', '3d-pose-baseline.caffemodel', caffe.TEST) data = np.random.rand(32) data = np.reshape(np.array(data), (1, 32)) #verify pred = keras_model.predict(data)[0] print(pred) out = net.forward_all(data=data) pred = out['dense_6'] print(pred)
from keras.preprocessing import image import keras2caffe #TensorFlow backend uses all GPU memory by default, so we need limit import tensorflow as tf from keras.backend.tensorflow_backend import set_session config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.5 set_session(tf.Session(config=config)) #converting keras_model = VGG16(input_shape=(224, 224, 3), weights='imagenet', include_top=True) keras2caffe.convert(keras_model, 'VGG16.prototxt', 'VGG16.caffemodel') #testing the model #caffe.set_mode_gpu() net = caffe.Net('VGG16.prototxt', 'VGG16.caffemodel', caffe.TEST) img = cv2.imread('bear.jpg') img = cv2.resize(img, (224, 224)) img = img[...,::-1] #RGB 2 BGR data = np.array(img, dtype=np.float32) data = data.transpose((2, 0, 1)) data.shape = (1,) + data.shape data -= 128
import sys sys.path.append('/media/toshiba_ml/models/keras-models/keras-squeezenet') from keras_squeezenet import SqueezeNet #TensorFlow backend uses all GPU memory by default, so we need limit import tensorflow as tf from keras.backend.tensorflow_backend import set_session config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.5 set_session(tf.Session(config=config)) #converting keras_model = SqueezeNet() keras2caffe.convert(keras_model, 'deploy.prototxt', 'SqueezeNet.caffemodel') #testing the model caffe.set_mode_gpu() net = caffe.Net('deploy.prototxt', 'SqueezeNet.caffemodel', caffe.TEST) img = cv2.imread(DATA_DIR+'bear.jpg') img = cv2.resize(img, (227, 227)) img = img[...,::-1] #RGB 2 BGR data = np.array(img, dtype=np.float32) data = data.transpose((2, 0, 1)) data.shape = (1,) + data.shape
MODEL_HDF5 = DATASET_ROOT_PATH + 'pretrain/gender_mini_XCEPTION.21-0.95.hdf5' ANNOTATION_WORDS = 'words/agegender_gender_words.txt' IMAGE_SIZE = 32 if (MODELS == 'simple_cnn'): IMAGE_SIZE = 48 if (MODELS == 'miniXception'): IMAGE_SIZE = 64 if (MODELS == 'vgg16'): IMAGE_SIZE = 224 keras_model = load_model(MODEL_HDF5) keras_model.summary() keras2caffe.convert( keras_model, DATASET_ROOT_PATH + 'pretrain/agegender_' + ANNOTATIONS + '_' + MODELS + '.prototxt', DATASET_ROOT_PATH + 'pretrain/agegender_' + ANNOTATIONS + '_' + MODELS + '.caffemodel') net = caffe.Net( DATASET_ROOT_PATH + 'pretrain/agegender_' + ANNOTATIONS + '_' + MODELS + '.prototxt', DATASET_ROOT_PATH + 'pretrain/agegender_' + ANNOTATIONS + '_' + MODELS + '.caffemodel', caffe.TEST) # ---------------------------------------------- # data # ---------------------------------------------- #img = cv2.imread('dataset/agegender/annotations/agegender/validation/0_0-2_m/landmark_aligned_face.84.8277643357_43f107482d_o.jpg') img = cv2.imread( 'dataset/agegender/annotations/agegender/validation/11_15-20_f/landmark_aligned_face.290.11594063605_713764ddeb_o.jpg' )
from datetime import datetime import keras2caffe from keras.models import load_model, model_from_json if __name__ == '__main__': if len(sys.argv) < 2: raise Exception('ERROR: No input model.') model_file = sys.argv[1] if not os.path.isfile(model_file): raise Exception('ERROR: model file is not exist!') if len(sys.argv) < 3: raise Exception('ERROR: No input weights.') w_file = sys.argv[2] if not os.path.isfile(model_file): raise Exception('ERROR: w file is not exist!') OUTPUT_DIR = datetime.now().strftime("output_%y%m%d_%H%M%S/") if not os.path.isdir(OUTPUT_DIR): os.makedirs(OUTPUT_DIR) model = model_from_json( open(sys.argv[1], 'r').read()) if sys.argv[1][-5:] == '.json' else load_model( sys.argv[1]) model.load_weights(sys.argv[2]) keras2caffe.convert(model, OUTPUT_DIR + 'deploy.prototxt', OUTPUT_DIR + 'model.caffemodel')
sys.path.append('/home/lc/keras2caffe/ssrnet/') import keras_ssrnet #import evalute_image #converting keras_model = keras_ssrnet.create_model(3, 3, 3, 0.5, 0.5, weights='imdb', include_top=True) keras2caffe.convert(keras_model, 'ssrnet.prototxt', 'ssrnet.caffemodel') #testing the model #caffe.set_mode_cpu() #net = caffe.Net('ssrnet.prototxt', 'ssrnet.caffemodel', caffe.TEST) #img = cv2.imread('man.JPG') #img = evaluate_image.central_crop(im, 0.875) #img = cv2.resize(img, (64, 64)) #img = img[...,::-1] #RGB 2 BGR #data = np.array(img, dtype=np.float32) #data = data.transpose((2, 0, 1)) #data.shape = (1,) + data.shape
import Models from Models import build_model parser = argparse.ArgumentParser() parser.add_argument("--model_name", type=str, default="unet") parser.add_argument("--n_classes", type=int, default=2) parser.add_argument("--input_height", type=int, default=224) parser.add_argument("--input_width", type=int, default=224) parser.add_argument("--input_model", type=str, default="../weights/xxx.hd55") parser.add_argument("--output_model", type=str, default="./unet.prototxt") parser.add_argument("--output_weight", type=str, default="./unet.caffemodel") args = parser.parse_args() n_classes = args.n_classes input_height = args.input_height input_width = args.input_width model_name = args.model_name input_model_path = args.input_model output_model = args.output_weight output_weight = args.output_weight model = build_model(model_name, n_classes, input_height=input_height, input_width=input_width) model.load_weights(input_model_path) keras2caffe.convert(model, output_model, output_model)
if(ANNOTATIONS=="gender_octavio"): MODEL_HDF5=DATASET_ROOT_PATH+'pretrain/gender_mini_XCEPTION.21-0.95.hdf5' ANNOTATION_WORDS='words/agegender_gender_words.txt' IMAGE_SIZE = 32 if(MODELS=='simple_cnn'): IMAGE_SIZE = 48 if(MODELS=='miniXception'): IMAGE_SIZE = 64 if(MODELS=='vgg16'): IMAGE_SIZE = 224 keras_model = load_model(MODEL_HDF5) keras_model.summary() keras2caffe.convert(keras_model, DATASET_ROOT_PATH+'pretrain/agegender_'+ANNOTATIONS+'_'+MODELS+'.prototxt', DATASET_ROOT_PATH+'pretrain/agegender_'+ANNOTATIONS+'_'+MODELS+'.caffemodel') net = caffe.Net(DATASET_ROOT_PATH+'pretrain/agegender_'+ANNOTATIONS+'_'+MODELS+'.prototxt', DATASET_ROOT_PATH+'pretrain/agegender_'+ANNOTATIONS+'_'+MODELS+'.caffemodel', caffe.TEST) # ---------------------------------------------- # data # ---------------------------------------------- #img = cv2.imread('dataset/agegender/annotations/agegender/validation/0_0-2_m/landmark_aligned_face.84.8277643357_43f107482d_o.jpg') img = cv2.imread('dataset/agegender/annotations/agegender/validation/11_15-20_f/landmark_aligned_face.290.11594063605_713764ddeb_o.jpg') #img = cv2.imread('dataset/agegender/annotations/agegender/validation/3_15-20_m/landmark_aligned_face.291.11593859573_1419d380b4_o.jpg') img = cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE)) if(ANNOTATIONS=='emotion' or ANNOTATIONS=='gender_octavio'): img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) img = np.expand_dims(img, axis=2)