def load(path, opts, vars): print('load model from ' + path) # parse input/output dimensions trainerPath = path trainerPath = trainerPath.replace("trainer.PythonModel.model", "trainer") print('tp: {}'.format(trainerPath)) xmldoc = minidom.parse(trainerPath) streams = xmldoc.getElementsByTagName('streams') streamItem = streams[0].getElementsByTagName('item') n_input = streamItem[0].attributes['dim'].value print("#input: " + str(n_input)) classes = xmldoc.getElementsByTagName('classes') n_output = len(classes[0].getElementsByTagName('item')) print("#output: " + str(n_output)) # copy unique network file network_tmp = os.path.dirname(path) + '\\' + opts['network'] + '.py' shutil.copy(path + '.' + opts['network'] + '.py', network_tmp) # reload sys path and import network file importlib.reload(site) print('Modelpath {}'.format(path + '_keras.h5')) # load model vars['model'] = load_model(path + '_keras.h5', custom_objects={'correlation_coefficient_loss': correlation_coefficient_loss})
def check_model(model, model_name, x, y): model.compile('adam', 'binary_crossentropy', metrics=['binary_crossentropy']) model.fit(x, y, batch_size=100, epochs=1, validation_split=0.5) print(model_name+" test train valid pass!") model.save_weights(model_name + '_weights.h5') model.load_weights(model_name + '_weights.h5') print(model_name+" test save load weight pass!") save_model(model, model_name + '.h5') model = load_model(model_name + '.h5', custom_objects) print(model_name + " test save load model pass!") print(model_name + " test pass!")
def test_DIN_model_io(): model_name = "DIN_att" _, _, feature_dim_dict, behavior_feature_list = get_xy_fd() model = DIN(feature_dim_dict, behavior_feature_list, hist_len_max=4, embedding_size=8, att_activation=Dice, use_din=True, hidden_size=[4, 4, 4], keep_prob=0.6,) model.compile('adam', 'binary_crossentropy', metrics=['binary_crossentropy']) #model.fit(x, y, verbose=1, validation_split=0.5) save_model(model, model_name + '.h5') model = load_model(model_name + '.h5', custom_objects) print(model_name + " test save load model pass!")
def convert(in_path, out_path): """Convert any Keras model to the frugally-deep model format.""" assert K.backend() == "tensorflow" assert K.floatx() == "float32" assert K.image_data_format() == 'channels_last' print('loading {}'.format(in_path)) model = load_model(in_path) # Force creation of underlying functional model. # see: https://github.com/fchollet/keras/issues/8136 # Loss and optimizer type do not matter, since to don't train the model. model.compile(loss='mse', optimizer='sgd') model = convert_sequential_to_model(model) test_data = gen_test_data(model) json_output = {} json_output['architecture'] = json.loads(model.to_json()) json_output['image_data_format'] = K.image_data_format() for depth in range(1, 3, 1): json_output['conv2d_valid_offset_depth_' + str(depth)] =\ check_operation_offset(depth, offset_conv2d_eval, 'valid') json_output['conv2d_same_offset_depth_' + str(depth)] =\ check_operation_offset(depth, offset_conv2d_eval, 'same') json_output['separable_conv2d_valid_offset_depth_' + str(depth)] =\ check_operation_offset(depth, offset_sep_conv2d_eval, 'valid') json_output['separable_conv2d_same_offset_depth_' + str(depth)] =\ check_operation_offset(depth, offset_sep_conv2d_eval, 'same') json_output['max_pooling_2d_valid_offset'] =\ check_operation_offset(1, conv2d_offset_max_pool_eval, 'valid') json_output['max_pooling_2d_same_offset'] =\ check_operation_offset(1, conv2d_offset_max_pool_eval, 'same') json_output['average_pooling_2d_valid_offset'] =\ check_operation_offset(1, conv2d_offset_average_pool_eval, 'valid') json_output['average_pooling_2d_same_offset'] =\ check_operation_offset(1, conv2d_offset_average_pool_eval, 'same') json_output['input_shapes'] = get_shapes(test_data['inputs']) json_output['output_shapes'] = get_shapes(test_data['outputs']) json_output['tests'] = [test_data] json_output['trainable_params'] = get_all_weights(model) print('writing {}'.format(out_path)) write_text_file(out_path, json.dumps( json_output, allow_nan=False, indent=2, sort_keys=True))
def model_to_estimator(keras_model=None, keras_model_path=None, custom_objects=None, model_dir=None, config=None): """Constructs an `Estimator` instance from given keras model. For usage example, please see: [Creating estimators from Keras Models](https://tensorflow.org/guide/estimators#model_to_estimator). Args: keras_model: A compiled Keras model object. This argument is mutually exclusive with `keras_model_path`. keras_model_path: Path to a compiled Keras model saved on disk, in HDF5 format, which can be generated with the `save()` method of a Keras model. This argument is mutually exclusive with `keras_model`. custom_objects: Dictionary for custom objects. model_dir: Directory to save `Estimator` model parameters, graph, summary files for TensorBoard, etc. config: `RunConfig` to config `Estimator`. Returns: An Estimator from given keras model. Raises: ValueError: if neither keras_model nor keras_model_path was given. ValueError: if both keras_model and keras_model_path was given. ValueError: if the keras_model_path is a GCS URI. ValueError: if keras_model has not been compiled. """ if not (keras_model or keras_model_path): raise ValueError( 'Either `keras_model` or `keras_model_path` needs to be provided.') if keras_model and keras_model_path: raise ValueError( 'Please specity either `keras_model` or `keras_model_path`, ' 'but not both.') if not keras_model: if keras_model_path.startswith( 'gs://') or 'storage.googleapis.com' in keras_model_path: raise ValueError( '%s is not a local path. Please copy the model locally first.' % keras_model_path) logging.info('Loading models from %s', keras_model_path) keras_model = models.load_model(keras_model_path) else: logging.info('Using the Keras model provided.') keras_model = keras_model if not hasattr(keras_model, 'optimizer') or not keras_model.optimizer: raise ValueError( 'The given keras model has not been compiled yet. ' 'Please compile the model with `model.compile()` ' 'before calling `model_to_estimator()`.') config = estimator_lib.maybe_overwrite_model_dir_and_session_config(config, model_dir) keras_model_fn = _create_keras_model_fn(keras_model, custom_objects) if _any_weight_initialized(keras_model): # Warn if config passed to estimator tries to update GPUOptions. If a # session has already been created, the GPUOptions passed to the first # session sticks. if config.session_config.HasField('gpu_options'): logging.warning( 'The Keras backend session has already been set. ' 'The _session_config passed to model_to_estimator will not be used.') else: # Pass the config into keras backend's default session. sess = session.Session(config=config.session_config) K.set_session(sess) warm_start_path = None if keras_model._is_graph_network: warm_start_path = _save_first_checkpoint(keras_model, custom_objects, config) elif keras_model.built: logging.warning('You are creating an Estimator from a Keras model manually ' 'subclassed from `Model`, that was already called on some ' 'inputs (and thus already had weights). We are currently ' 'unable to preserve the model\'s state (its weights) as ' 'part of the estimator in this case. Be warned that the ' 'estimator has been created using a freshly initialized ' 'version of your model.\n' 'Note that this doesn\'t affect the state of the model ' 'instance you passed as `keras_model` argument.') estimator = estimator_lib.Estimator(keras_model_fn, config=config, warm_start_from=warm_start_path) return estimator
data_Validation_Input_mean = np.mean(data_Validation_Input, axis=1) data_Validation_Input_std = np.std(data_Validation_Input, axis=1, ddof=1) data_Test_Target_norm = stats.zscore(data_Test_Target, axis=1) data_Test_Target_mean = np.mean(data_Test_Target, axis=1) data_Test_Target_std = np.std(data_Test_Target, axis=1, ddof=1) data_Test_Input_norm = stats.zscore(data_Test_Input, axis=1) data_Test_Input_mean = np.mean(data_Test_Input, axis=1) data_Test_Input_std = np.std(data_Test_Input, axis=1, ddof=1) model_filename = "Model_DEMO_13ch.h5" if os.path.isfile(model_filename): model = load_model(model_filename) print("Loaded model") else: print('Create new model') model = Sequential() model.add(Input(shape=(13, ))) model.add(Dense(20, activation='tanh', name="hidden_layer1")) model.add(Dense(20, activation='tanh', name="hidden_layer2")) model.add(Dense(21, activation='pure_linear', name="final_layer")) model.compile(loss='MeanSquaredError', optimizer=Adam(), metrics=['accuracy']) model.summary()
parse_args() table_name = args['Table'] if not table_name: table_name = DEFAULT_TABLE top_k = args['Num'] file = request.files.get('file', "") if not file: return "no file data", 400 if not file.name: return "need file name", 400 if file: filename = secure_filename(file.filename) file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename) file.save(file_path) res_id, res_distance = do_search(table_name, file_path, top_k, model, graph, sess) if isinstance(res_id, str): return res_id res_img = [request.url_root + "data/" + x for x in res_id] res = dict(zip(res_img, res_distance)) res = sorted(res.items(), key=lambda item: item[1]) print(jsonify(res)) return jsonify(res), 200 return "not found", 400 if __name__ == "__main__": load_model() app.run(host="0.0.0.0")
def main(): FLAGS, unparsed = parser.parse_known_args() model = load_model(FLAGS.model, FLAGS.backbone) img_lst, msk_lst = create_list(FLAGS.img_txt, FLAGS.msk_txt) inference(img_lst, msk_lst, model, FLAGS.output)
def train_and_predict(train_files, valid_files, test_files, feature_files, job_dir, balance=False): if tf.test.gpu_device_name(): print('Default GPU: {}'.format(tf.test.gpu_device_name())) else: print("Failed to find default GPU.") #sys.exit(1) print('Creating local copies, if necessary...') train_files = [ create_local_copy(train_file) for train_file in train_files.split(',') ] valid_files = [ create_local_copy(val_file) for val_file in valid_files.split(',') ] test_files = [ create_local_copy(test_file) for test_file in test_files.split(',') ] feature_files = [ create_local_copy(feature_file) for feature_file in feature_files.split(',') ] print('Loading data...') train_ground_truth = groundtruth.open_ground_truth(train_files) print('Loaded {} training annotations from {}.'.format( len(train_ground_truth.key_index_labels), train_files)) train_valid_file = valid_files[0] valid_ground_truth = groundtruth.open_ground_truth(train_valid_file) print('Loaded {} validation annotations from {}.'.format( len(valid_ground_truth.key_index_labels), train_valid_file)) # ensure the same indices are used in training and validation valid_ground_truth.use_indices_from(train_ground_truth) train_valid_features = {} for feature_file in feature_files: train_valid_features.update(joblib.load(feature_file)) # create generator batch_size = 1000 lr = 0.001 epochs = 500 dropout = 0.2 filters = 64 checkpoint_model_file = 'checkpoint_model.h5' model_file = join(job_dir, 'model.h5') input_shape = (40, 9) # mel feature space classes_count = len(train_ground_truth.classes()) print('Number of classes: {}'.format(classes_count)) with FileIO(join(job_dir, 'classes.txt'), mode='w') as output_f: for i, name in enumerate(train_ground_truth.index_to_label): output_f.write("{}: {}\n".format(i, name).encode('utf-8')) print('Creating generators...') classes_to_balance = None if balance: # balance based on main genres classes_to_balance = train_ground_truth.main_classes() train_generator = DataGenerator( train_ground_truth.key_index_labels, classes_count, classes_to_balance, sample_loader=create_mel_sample_loader(train_valid_features), batch_size=batch_size, sample_shape=input_shape, shuffle=True) valid_generator = DataGenerator( valid_ground_truth.key_index_labels, classes_count, None, sample_loader=create_mel_sample_loader(train_valid_features), batch_size=batch_size, sample_shape=input_shape, shuffle=False) print('Creating model...') model = create_model(input_shape=input_shape, output_dim=classes_count, filters=filters, dropout=dropout) model.compile(loss='binary_crossentropy', optimizer=(Adam(lr=lr)), metrics=['binary_accuracy']) print('Number of model parameters: {}'.format(model.count_params())) print(model.summary()) print('Training...') callbacks = [ EarlyStopping(monitor='val_loss', patience=50, verbose=1), ModelCheckpoint(checkpoint_model_file, monitor='val_loss') ] history = model.fit_generator(train_generator, epochs=epochs, callbacks=callbacks, validation_data=valid_generator) print(model.summary()) print('Setup: batch_size={}, epochs={}, dropout={}, filters={}'.format( batch_size, epochs, dropout, filters)) print_history(history.history) print('Loading best model before early stopping...') model = load_model(checkpoint_model_file) # and save to job_dir save_model(model, model_file) for train_file, valid_file, test_file in zip(train_files, valid_files, test_files): thresholds = predict_validation_labels(model, valid_file, train_ground_truth, train_valid_features, job_dir, input_shape) predict_test_labels(model, test_file, train_file, train_ground_truth, thresholds, job_dir, input_shape)
plt.figure() plt.imshow(img) plt.imshow(res.reshape(img_rows, img_cols), cmap='gray', alpha=0.6) #plt.imshow(get_image("masks_1class/" + image).astype(np.uint8)*255, cmap='gray', alpha=0.6) plt.show() if __name__ == '__main__': main() # rubbish collection tf.keras.backend.clear_session() # manual ----------------------------- IMG_SIZE = 256 IMG = 'stack_palorinya_22Jan2018.tif_14_4.png' model = load_model("models/model_spacenet_vam.h5", custom_objects={ 'dice_coef_loss': dice_coef_loss, 'dice_coef': dice_coef }) img = load_images([IMG], "VAMdata/images/", scale=True)[0] mask = load_images([IMG], "VAMdata/masks/", labels=True)[0] model.evaluate(img.reshape(1, IMG_SIZE, IMG_SIZE, 3), mask.reshape(1, IMG_SIZE, IMG_SIZE, 1)) res = model.predict(img.reshape(1, IMG_SIZE, IMG_SIZE, 3)).reshape(IMG_SIZE, IMG_SIZE) np.histogram(res) plt.imshow((img * 255.).astype(int)) plt.imshow(res * 255, cmap='RdGy', alpha=0.6)
types = ('*.bmp', '*.jpg', '.*gif', '*.png', '*.tif', '*.jpeg') pathlist = [] pathref = [] shutil.rmtree('outputDIPInceptionV3LAUG') for files in types: pathlist.extend(glob2.glob(os.path.join(path_input, files))) pathref.extend(glob2.glob(os.path.join(path_ref, files))) def distance(pointA, pointB): dist = np.linalg.norm(pointA - pointB) return dist model_ori = load_model('2kInception.h5') model_ori.summary() model = Model(model_ori.input, model_ori.layers[-2].output) model.summary() features = [] feature_ref = [] path_num = len(pathlist) // 1 for file in range(path_num): fp = pathlist[file] img_pil = Image.open(fp).convert('RGB') img_pil = img_pil.resize((224, 224)) img_cv = np.asarray(img_pil) indput_data = img_cv / 127.5 - 1 indput_data = np.expand_dims(indput_data, axis=0)
def predict_gen(model_path, start_path, pred_only_gt=False, maxmin_input=False, one_zero_scale=False, only=[], exclude=[], predict=True): model = load_model(model_path, compile=False) model_dim = len( model.get_config()['layers'][0]['config']['batch_input_shape']) model_config = model.get_config( )['layers'][0]['config']['batch_input_shape'] print('model:', model_path.split('/')[-1], ', model dimensions:', model_dim - 2) s = '/' dcm_file_path = [] dcm_files = {} xml_files = {} print() for dir1 in os.listdir(start_path): if dir1.startswith('.DS'): continue path1 = start_path + s + dir1 for dir2 in os.listdir(path1): if dir2.startswith('.DS'): continue pat_nr = int(dir2.split('-')[2]) path2 = path1 + s + dir2 if len(only) > 20: sys.stdout.write("\033[F") print('reading through files, at patient_nr:', pat_nr) if int(pat_nr) in exclude: continue if only: if int(pat_nr) not in only: continue for dir3 in os.listdir(path2): if dir3.startswith('.DS'): continue path3 = path2 + s + dir3 for dir4 in os.listdir(path3): if dir4.startswith('.DS'): continue path4 = path3 + s + dir4 for file in os.listdir(path4): if file.endswith('dcm') and len( os.listdir(path4)) > 10: dcm_file_path.append(path4 + s + file) if file.endswith('.xml') and len( os.listdir(path4)) > 10: xml_file_path = path4 + s + file dcm_files[pat_nr] = dcm_file_path xml_files[pat_nr] = xml_file_path dcm_file_path = [] for pat in dcm_files.keys(): orig = [] for image in dcm_files[pat]: itkimage = sitk.ReadImage(image) origin = np.array(list(reversed(itkimage.GetOrigin()))) orig.append([origin[0], image]) #resolution z_res = np.abs(sorted(orig)[0][0] - sorted(orig)[1][0]) tree = et.parse(xml_files[pat]) root = tree.getroot() nodDicSmall = {} # nodule <= 3mm dict nodDicLarge = {} #large nodule dict nonNodDic = {} #non-nodule dict nod_list = [] non_nod_list = [] nod_small_list = [] # Read all unblinded read sessions readSes = root.findall('{http://www.nih.gov}readingSession') for doctor in range(len(readSes)): #get real nodules(tumors) nodule = readSes[doctor].findall( '{http://www.nih.gov}unblindedReadNodule') for i in range(len(nodule)): for roi in nodule[i].findall('{http://www.nih.gov}roi'): # single pixel nodules if len(roi) <= 5: zValue = roi.find('{http://www.nih.gov}imageZposition') for edgeMap in roi.findall( '{http://www.nih.gov}edgeMap'): xValue = edgeMap.find('{http://www.nih.gov}xCoord') yValue = edgeMap.find('{http://www.nih.gov}yCoord') nodDicSmall.setdefault(float(zValue.text), []).append([ int(xValue.text), int(yValue.text) ]) else: zValue = roi.find('{http://www.nih.gov}imageZposition') for edgeMap in roi.findall( '{http://www.nih.gov}edgeMap'): xValue = edgeMap.find('{http://www.nih.gov}xCoord') yValue = edgeMap.find('{http://www.nih.gov}yCoord') nodDicLarge.setdefault(float(zValue.text), []).append([ int(xValue.text), int(yValue.text) ]) nod_list.append(nodDicLarge) nod_small_list.append(nodDicSmall) nodDicSmall = {} nodDicLarge = {} #get non-nodules nonNodule = readSes[doctor].findall( '{http://www.nih.gov}nonNodule') for i in range(len(nonNodule)): zValue = nonNodule[i].find( '{http://www.nih.gov}imageZposition') for locus in nonNodule[i].findall('{http://www.nih.gov}locus'): xValue = locus.find('{http://www.nih.gov}xCoord') yValue = locus.find('{http://www.nih.gov}yCoord') nonNodDic.setdefault(float(zValue.text), []).append( [int(xValue.text), int(yValue.text)]) non_nod_list.append(nonNodDic) nonNodDic = {} output_im = np.zeros((4, len(dcm_files[pat]), 512, 512, 2), dtype=np.uint8) input_im = np.zeros((len(dcm_files[pat]), 512, 512, 1)) cnt = 0 for orig, path in sorted(orig): itkimage = sitk.ReadImage(path) ct_scan = sitk.GetArrayFromImage(itkimage) im = ct_scan[0, :, :] input_im[cnt, :, :, 0] = im #resolution spacing = np.array(list(reversed(itkimage.GetSpacing()))) xy_res = spacing[1] # large nodules for list_ in range(len(nod_list)): if orig in nod_list[list_].keys(): for j in range(len(nod_list[list_][orig])): output_im[list_, cnt, nod_list[list_][orig][j][1], nod_list[list_][orig][j][0], 1] = 255 output_im[list_, cnt, :, :, 1] = object_filler( output_im[list_, cnt, :, :, 1], (0, 0)) #output_im[list_, cnt, :, :, 0] = cv2.medianBlur(np.uint8(output_im[list_, cnt, :, :,0]), 7) cnt += 1 out = np.zeros((len(dcm_files[pat]), 512, 512, 2)) out[:, :, :, 1] = np.add( np.add(output_im[0, :, :, :, 1].astype(int), output_im[1, :, :, :, 1].astype(int)), np.add(output_im[2, :, :, :, 1].astype(int), output_im[3, :, :, :, 1].astype(int))) out[:, :, :, 1][out[:, :, :, 1] < 300] = 0 out[:, :, :, 1][out[:, :, :, 1] >= 300] = 1 #one-hot out[:, :, :, 0][out[:, :, :, 1] == 1] = 0 out[:, :, :, 0][out[:, :, :, 1] == 0] = 1 #resolution res = [z_res, xy_res] if predict: # ----- predicting 2d Unet ------ if model_dim == 4: im = np.zeros((1, input_im.shape[1], input_im.shape[2], input_im.shape[3])) pred_output = np.zeros( (out.shape[0], out.shape[1], out.shape[2], out.shape[3])) for i in tqdm(range(input_im.shape[0])): im[0, :, :, :] = input_im[i, :, :, :] if pred_only_gt: if np.count_nonzero(out[i, :, :, 1]) == 0: continue pred_output[i, :, :, :] = model.predict(im) # ----- predicting 3d Unet ----- if model_dim == 5: inp = np.zeros((input_im.shape[0], 256, 256, 1)) output = np.zeros((input_im.shape[0], 256, 256, 2)) for i in range(input_im.shape[0]): if maxmin_input: inp[i, :, :, 0] = cv2.resize(maxminscale(input_im[i, :, :, 0]), (256, 256)) else: inp[i, :, :, 0] = cv2.resize(input_im[i, :, :, 0], (256, 256)) output[i, :, :, 0] = cv2.resize(out[i, :, :, 0], (256, 256), interpolation=cv2.INTER_NEAREST) output[i, :, :, 1] = cv2.resize(out[i, :, :, 1], (256, 256), interpolation=cv2.INTER_NEAREST) input_im = inp.copy() out = output.copy() chunks = int(np.ceil(input_im.shape[0] / model_config[1])) pred_output = np.zeros((model_config[1] * chunks, model_config[2], model_config[3], 2)) pred_output_5d = np.zeros( (chunks, model_config[1], model_config[2], model_config[3], 2)) im = np.zeros((chunks, model_config[1], model_config[2], model_config[3], model_config[4])) for i in range(chunks): for j in range(model_config[1]): if model_config[1] * i + j >= input_im.shape[0]: continue im[i, j, :, :, :] = input_im[model_config[1] * i + j, :, :, :] for i in tqdm(range(chunks)): if pred_only_gt: if np.count_nonzero(out[model_config[1] * i:(i + 1) * model_config[1], :, :, 1]) == 0: continue pred_output_5d[i, :, :, :, :] = model.predict( np.expand_dims(im[i, :, :, :, :], axis=0)) for i in range(chunks): for j in range(pred_output_5d.shape[1]): pred_output[i * pred_output_5d.shape[1] + j, :, :, :] = pred_output_5d[i, j, :, :, :] yield input_im, out, pred_output, pat, res else: yield input_im, out, pat, res
import pickle from tensorflow.python.keras.models import load_model import numpy as np pickle_in = open(r"saved_data\X.pickle", "rb") X = pickle.load(pickle_in) pickle_in = open(r"saved_data\y.pickle", "rb") y = pickle.load(pickle_in) <<<<<<< HEAD X = X.reshape(-1, 200, 200, 1) ======= X = X.reshape(-1, 100, 100, 1) >>>>>>> 68617e521dd6cf27f48edb101401be2540fc2c58 X = X/255.0 y = np.array(y).astype(float) model = load_model(r"saved_data\model.h5") model.evaluate(X, y, verbose=2)
print("Loading Models") for model_name in ENSEMBLE_MODELS: path = MODEL_PATH + model_name + '.hdf5' print(path) modelx = models.load_model(path, custom_objects={ 'bce_jaccard_loss':bce_jaccard_loss, 'bce_dice_loss':bce_dice_loss, 'jaccard_loss':jaccard_loss, 'jaccard_index': jaccard_index, 'dice_loss' : dice_loss, 'dice_coeff' : dice_coeff, 'pixelwise_specificity' : pixelwise_specificity, 'pixelwise_sensitivity' : pixelwise_sensitivity, 'pixelwise_accuracy' : pixelwise_accuracy }) predictions += inv_sigmoid(modelx.predict(images)) #Logits predictions = predictions / len(ENSEMBLE_MODELS) predictions = sigmoid(predictions) predictions = np.squeeze(predictions) print("Post Processing")
import numpy as np from PIL import Image from tensorflow.python.keras.models import load_model, Model import time import random import os os.environ['CUDA_VISIBLE_DEVICES'] = '-1' TRAINING_DIR = 'C:/Users/raz/MyDatasets/data/' model = None model56 = load_model(TRAINING_DIR + "model/imitate_56_model.h5") LETTERSTR = "0123456789ABCDEFGHJKLMNPQRSTUVWXYZ" correct, wrong = 0, 0 for i in range(1, 13): t1 = time.time() img = Image.open('test/' + str(i) + '.jpg') captcha = img captcha.convert("RGB").save('captcha.jpg', 'JPEG') p56 = model56.predict( np.stack([np.array(Image.open('captcha.jpg')) / 255.0]))[0][0] print(p56) continue
def predict(picture_file, dims): """ Return the predictions for pictures specified ina file. """ param_names = [ 'vr', 'vt', 'vt_phi', 'size_ratio', 'mass_ratio', 'Rsep', 'lMW', 'bMW', 'lM31', 'bM31', 'lR', 'bR' ] param_values = [ [-130., -90., -50.], #vr [10., 20., 30.], #vt [-45., 0., 45.], #vt_phi [0.25, 0.5, 0.75, 1., 1.25, 1.5], #size_ratio [0.25, 0.5, 0.75, 1., 1.25, 1.5], #mass_ratio [778., 788., 798.], #Rsep [0., 90., 180.], #lMW [-90., 0., 90.], #bMW [200., 220., 240.], #lM31 [-90., -60., -30.], #bM31 [120., 121., 122.], #lR [-25., -23., -21.] ] #bR picture_names, size_ratios, mass_ratios = [], [], [] with open(picture_file, 'rt') as f: for line in f: tmp1, tmp2, tmp3 = line.split(" ") picture_names.append(tmp1) size_ratios.append(float(tmp2)) mass_ratios.append(float(tmp3[:-1])) perfect_counter = 0 half_perfect_counter = 0 model = load_model(FLAGS.saved_model_name) print("Model being used:", FLAGS.saved_model_name) picture_array = np.zeros((len(picture_names), dims[0], dims[1], dims[2]), dtype=np.float32) graph, nameholder, image_tensor = BPic().tf_decoder(dims) with tf.Session(graph=graph) as sess: init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) sess.run(init) for i, name in enumerate(picture_names): picture_array[i] = sess.run(image_tensor, feed_dict={nameholder: name}) picture_array[i] = np.array(picture_array[i], dtype=np.float32) picture_array[i] /= 255 if i % 500 == 0: print(i) print("Start") predictions = model.predict(picture_array, verbose=1) print("End") for i in range(len(picture_names)): print(i) max1 = np.amax(predictions[i][:6]) max2 = np.amax(predictions[i][6:]) idx1 = np.where(predictions[i] == max1)[0][0] idx2 = np.where(predictions[i] == max2)[0][0] - 6 pred1 = param_values[param_names.index('size_ratio')][idx1] pred2 = param_values[param_names.index('mass_ratio')][idx2] print("Picture name:", picture_names[i]) print("All values:", predictions[i]) print("---|Predictions|--- Size ratio: %.2f Mass ratio: %.2f" % (pred1, pred2)) if size_ratios[i] == pred1 and mass_ratios[i] == pred2: perfect_counter += 1 elif size_ratios[i] == pred1 or mass_ratios[i] == pred2: half_perfect_counter += 1 print("Perfect accuracy:", float(perfect_counter) / float(len(picture_names))) print("Half perfect accuracy:", float(half_perfect_counter) / float(len(picture_names)))
def load(self, model_path): self.model = load_model(model_path)
def load_model(self, file_path): self.__model = models.load_model(file_path)
def model_to_estimator(keras_model=None, keras_model_path=None, custom_objects=None, model_dir=None, config=None, checkpoint_format=None, use_v2_estimator=False): # LINT.ThenChange(//tensorflow/python/keras/estimator/__init__.py) """Constructs an `Estimator` instance from given keras model. If you use infrastructure or other tooling that relies on Estimators, you can still build a Keras model and use model_to_estimator to convert the Keras model to an Estimator for use with downstream systems. For usage example, please see: [Creating estimators from Keras Models](https://www.tensorflow.org/guide/estimators#creating_estimators_from_keras_models). Sample Weights: Estimators returned by `model_to_estimator` are configured so that they can handle sample weights (similar to `keras_model.fit(x, y, sample_weights)`). To pass sample weights when training or evaluating the Estimator, the first item returned by the input function should be a dictionary with keys `features` and `sample_weights`. Example below: ```python keras_model = tf.keras.Model(...) keras_model.compile(...) estimator = tf.keras.estimator.model_to_estimator(keras_model) def input_fn(): return dataset_ops.Dataset.from_tensors( ({'features': features, 'sample_weights': sample_weights}, targets)) estimator.train(input_fn, steps=1) ``` Args: keras_model: A compiled Keras model object. This argument is mutually exclusive with `keras_model_path`. Estimator's `model_fn` uses the structure of the model to clone the model. Defaults to `None`. keras_model_path: Path to a compiled Keras model saved on disk, in HDF5 format, which can be generated with the `save()` method of a Keras model. This argument is mutually exclusive with `keras_model`. Defaults to `None`. custom_objects: Dictionary for cloning customized objects. This is used with classes that is not part of this pip package. For example, if user maintains a `relu6` class that inherits from `tf.keras.layers.Layer`, then pass `custom_objects={'relu6': relu6}`. Defaults to `None`. model_dir: Directory to save `Estimator` model parameters, graph, summary files for TensorBoard, etc. If unset a directory will be created with `tempfile.mkdtemp` config: `RunConfig` to config `Estimator`. Allows setting up things in `model_fn` based on configuration such as `num_ps_replicas`, or `model_dir`. Defaults to `None`. If both `config.model_dir` and the `model_dir` argument (above) are specified the `model_dir` **argument** takes precedence. checkpoint_format: Sets the format of the checkpoint saved by the estimator when training. May be `saver` or `checkpoint`, depending on whether to save checkpoints from `tf.compat.v1.train.Saver` or `tf.train.Checkpoint`. The default is `checkpoint`. Estimators use name-based `tf.train.Saver` checkpoints, while Keras models use object-based checkpoints from `tf.train.Checkpoint`. Currently, saving object-based checkpoints from `model_to_estimator` is only supported by Functional and Sequential models. use_v2_estimator: Whether to convert the model to a V2 Estimator or V1 Estimator. Defaults to `False`. Returns: An Estimator from given keras model. Raises: ValueError: If neither keras_model nor keras_model_path was given. ValueError: If both keras_model and keras_model_path was given. ValueError: If the keras_model_path is a GCS URI. ValueError: If keras_model has not been compiled. ValueError: If an invalid checkpoint_format was given. """ if not (keras_model or keras_model_path): raise ValueError( 'Either `keras_model` or `keras_model_path` needs to be provided.') if keras_model and keras_model_path: raise ValueError( 'Please specity either `keras_model` or `keras_model_path`, ' 'but not both.') if keras_model: _assert_valid_model(keras_model, custom_objects) config = estimator_lib.maybe_overwrite_model_dir_and_session_config( config, model_dir) if not keras_model: if keras_model_path.startswith( 'gs://') or 'storage.googleapis.com' in keras_model_path: keras_model_path = _get_file_from_google_storage(keras_model_path, config.model_dir) tf.compat.v1.logging.info('Loading models from %s', keras_model_path) keras_model = models.load_model(keras_model_path) else: tf.compat.v1.logging.info('Using the Keras model provided.') keras_model = keras_model if checkpoint_format is None or checkpoint_format == 'checkpoint': if not (keras_model._is_graph_network or isinstance(keras_model, models.Sequential)): raise ValueError('Object-based checkpoints are currently not supported ' 'with subclassed models.') save_object_ckpt = True elif checkpoint_format == 'saver': save_object_ckpt = False else: raise ValueError( 'Checkpoint format must be one of "checkpoint" or "saver". Got {}' .format(checkpoint_format)) if not hasattr(keras_model, 'optimizer') or not keras_model.optimizer: raise ValueError('The given keras model has not been compiled yet. ' 'Please compile the model with `model.compile()` ' 'before calling `model_to_estimator()`.') keras_model_fn = _create_keras_model_fn(keras_model, custom_objects, save_object_ckpt) if _any_weight_initialized(keras_model): # Warn if config passed to estimator tries to update GPUOptions. If a # session has already been created, the GPUOptions passed to the first # session sticks. if config.session_config.HasField('gpu_options'): tf.compat.v1.logging.warn( 'The Keras backend session has already been set. ' 'The _session_config passed to model_to_estimator will not be used.') else: # Pass the config into keras backend's default session. sess = tf.compat.v1.Session(config=config.session_config) K.set_session(sess) warm_start_path = None if keras_model._is_graph_network: warm_start_path = _save_first_checkpoint(keras_model, custom_objects, config, save_object_ckpt) elif keras_model.built: tf.compat.v1.logging.warn( 'You are creating an Estimator from a Keras model manually ' 'subclassed from `Model`, that was already called on some ' 'inputs (and thus already had weights). We are currently ' 'unable to preserve the model\'s state (its weights) as ' 'part of the estimator in this case. Be warned that the ' 'estimator has been created using a freshly initialized ' 'version of your model.\n' 'Note that this doesn\'t affect the state of the model ' 'instance you passed as `keras_model` argument.') if use_v2_estimator: estimator_cls = estimator_lib.EstimatorV2 else: estimator_cls = estimator_lib.Estimator estimator = estimator_cls( keras_model_fn, config=config, warm_start_from=warm_start_path) return estimator
'15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41' ] lbp_test = pd.read_csv('../LBP_feature_val.csv') fd_test = pd.read_csv('../fd_feature_val.csv') color_som_test = pd.read_csv('../color_som_val.csv') test_label = pd.read_csv('../val_label.csv') x_test = pd.concat([lbp_test, fd_test, color_som_test], axis=1).values test_label_list = test_label['class_no'].tolist() onehot_test = [] for value in test_label_list: class_id = [0 for _ in range(len(class_list))] class_id[value - 1] = 1 onehot_test.append(class_id) y_test = pd.DataFrame(onehot_test) net = load_model('my_dnn.h5') score = net.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) ''' pred = net.predict(x_test)[0] top_inds = pred.argsort()[::-1][:5] for i in top_inds: print(' {:.3f} {}'.format(pred[i], cls_list[i])) '''
def _minkowski_classify(self): self.image_classifier.minkowski_classify() max_class = int( np.argmax(self.image_classifier.minkowski_majority_preds)) if np.max(self.image_classifier.minkowski_majority_preds) < 0.8: self._add_fail_reason("Minkowski not confident enough") if np.any(np.std(self.image_classifier.minkowski_preds, axis=0) > 0.1): self._add_fail_reason("Minkowski distributions too broad") self.minkowski_classification = self.minkowski_cats[max_class] if __name__ == '__main__': cat_model = load_model( "/home/mltest1/tmp/pycharm_project_883/Data/Trained_Networks/2020-06-15--12-18/model.h5" ) denoise_model = load_model( "/home/mltest1/tmp/pycharm_project_883/Data/Trained_Networks/2020-05-29--14-07/model.h5" ) sklearn_model = load_sklearn_model( "/home/mltest1/tmp/pycharm_project_883/Data/Trained_Networks/2020-07-27--16-20/model.p" ) ims = [ "/media/mltest1/Dat Storage/Manu AFM CD Box/DATA 3/A6-AFMdata4/070926 - wetting experiment - AFM - C10 - toluene + xs thiol - Si and SiO2 - ring 5mm (continue)/SiO2_t10th_ring5_05mgmL_0000.ibw", "Data/Images/Parsed Dewetting 2020 for ML/thres_img/tp/SiO2_d10th_ring5_05mgmL_0004.ibw", ] for im in ims: test_filter = FileFilter()
def model_to_estimator(keras_model=None, keras_model_path=None, custom_objects=None, model_dir=None, config=None): """Constructs an `Estimator` instance from given keras model. For usage example, please see @{$guide/estimators$creating_estimators_from_keras_models}. Args: keras_model: A compiled Keras model object. This argument is mutually exclusive with `keras_model_path`. keras_model_path: Path to a compiled Keras model saved on disk, in HDF5 format, which can be generated with the `save()` method of a Keras model. This argument is mutually exclusive with `keras_model`. custom_objects: Dictionary for custom objects. model_dir: Directory to save `Estimator` model parameters, graph, summary files for TensorBoard, etc. config: `RunConfig` to config `Estimator`. Returns: An Estimator from given keras model. Raises: ValueError: if neither keras_model nor keras_model_path was given. ValueError: if both keras_model and keras_model_path was given. ValueError: if the keras_model_path is a GCS URI. ValueError: if keras_model has not been compiled. """ if not (keras_model or keras_model_path): raise ValueError( 'Either `keras_model` or `keras_model_path` needs to be provided.') if keras_model and keras_model_path: raise ValueError( 'Please specity either `keras_model` or `keras_model_path`, ' 'but not both.') if not keras_model: if keras_model_path.startswith( 'gs://') or 'storage.googleapis.com' in keras_model_path: raise ValueError( '%s is not a local path. Please copy the model locally first.' % keras_model_path) logging.info('Loading models from %s', keras_model_path) keras_model = models.load_model(keras_model_path) else: logging.info('Using the Keras model provided.') keras_model = keras_model if not hasattr(keras_model, 'optimizer') or not keras_model.optimizer: raise ValueError('The given keras model has not been compiled yet. ' 'Please compile the model with `model.compile()` ' 'before calling `model_to_estimator()`.') config = estimator_lib.maybe_overwrite_model_dir_and_session_config( config, model_dir) keras_model_fn = _create_keras_model_fn(keras_model, custom_objects) if _any_weight_initialized(keras_model): # Warn if config passed to estimator tries to update GPUOptions. If a # session has already been created, the GPUOptions passed to the first # session sticks. if config.session_config.HasField('gpu_options'): logging.warning( 'The Keras backend session has already been set. ' 'The _session_config passed to model_to_estimator will not be used.' ) else: # Pass the config into keras backend's default session. sess = session.Session(config=config.session_config) K.set_session(sess) warm_start_path = None if keras_model._is_graph_network: warm_start_path = _save_first_checkpoint(keras_model, custom_objects, config) elif keras_model.built: logging.warning( 'You are creating an Estimator from a Keras model manually ' 'subclassed from `Model`, that was already called on some ' 'inputs (and thus already had weights). We are currently ' 'unable to preserve the model\'s state (its weights) as ' 'part of the estimator in this case. Be warned that the ' 'estimator has been created using a freshly initialized ' 'version of your model.\n' 'Note that this doesn\'t affect the state of the model ' 'instance you passed as `keras_model` argument.') estimator = estimator_lib.Estimator(keras_model_fn, config=config, warm_start_from=warm_start_path) return estimator
这些局部相关性图只是模拟的适应度函数的近似值 - 而这恰好是适应度()中真实适应度函数的近似值。这可能有点难以理解。 例如,通过为learning_rate固定一个值,然后为搜索空间中的剩余维度采取大量随机样本,计算部分相关性。 然后对所有这些点的估计适应性进行平均。然后针对learning_rate的其他值重复此过程,以显示它对平均适应度的影响。 对于显示两个维度的部分依赖性图的图进行类似的程序。 ''' fig, ax = plot_objective(result=search_result, dimension_names=dim_names) ''' 我们也可以显示另一种类型的矩阵图。 这里对角线显示了贝叶斯优化期间每个超参数样本分布的直方图。 对角线下面的图表显示了搜索空间中样本的位置,颜色编码显示了样本被采集的顺序。 对于大量的样本, 您可能会发现样本最终会集中在搜索空间的某个区域。 ''' fig, ax = plot_evaluations(result=search_result, dimension_names=dim_names) # 评估测试集的最佳模型 # 我们现在可以在测试装置上使用最佳模型。 使用Keras重新加载模型非常简单。 model = load_model(path_best_model) result = model.evaluate(x=data.test.images, y=data.test.labels) for name, value in zip(model.metrics_names, result): print(name, value) ''' loss 0.0363312054525 acc 0.9888 ''' # 或者我们可以打印分类精确度。 print("{0}: {1:.2%}".format(model.metrics_names[1], result[1])) # acc: 98.88% # 预测新数据
else: sig = sigFull bkgIndx = np.random.choice(range(bkgSampleSize), size=sampleSize, replace=False) bkg = bkgFull[bkgIndx] print("Data Loaded!!!") # Load the input data scaler scaler = joblib.load("scaler.save") print("Scaler Loaded!!!") # Load the model loaded_model = m.load_model("simplePer.h5") loaded_model.summary() print("Model Loaded!!!") # Prep the data for model evaluation sigT = scaler.transform(sig) sigLabel = np.matrix([1] * sigT.shape[0]) sigLabel = np.transpose(sigLabel) bkgT = scaler.transform(bkg) bkgLabel = np.matrix([0] * bkgT.shape[0]) bkgLabel = np.transpose(bkgLabel) # Predict on the data sig_pred = loaded_model.predict(sigT) bkg_pred = loaded_model.predict(bkgT)
def load_my_model(model_path): global model model = load_model(os.path.join(os.path.dirname(__file__), model_path)) model._make_predict_function() print('Model loaded. Start serving...')
import warnings warnings.filterwarnings('ignore') from pickle import load from pickle import dump import random import nltk import numpy as np from tensorflow.python.keras.models import load_model model = load_model('model_word_final.h5') tokenizer = load(open('Tokenizer_final.pkl','rb')) def sample(preds, temperature=1.0): # Sample an index (pasted code). preds = np.asarray(preds).astype("float64") preds = preds / np.sum(preds) probas = np.random.multinomial(1, preds, 1) return np.argmax(probas) def get_sent(model, tokenizer,start_word, n_tokens): generated=[start_word] sent = start_word for i in range(n_tokens): x_preds=tokenizer.texts_to_sequences([sent])[0] preds=model.predict(x_preds,verbose=0)
def train(cfg: Config, tub_paths: str, model: str = None, model_type: str = None, transfer: str = None, comment: str = None) \ -> tf.keras.callbacks.History: """ Train the model """ database = PilotDatabase(cfg) if model_type is None: model_type = cfg.DEFAULT_MODEL_TYPE model_path, model_num = \ get_model_train_details(database, model) base_path = os.path.splitext(model_path)[0] kl = get_model_by_type(model_type, cfg) if transfer: kl.load(transfer) if cfg.PRINT_MODEL_SUMMARY: print(kl.interpreter.model.summary()) tubs = tub_paths.split(',') all_tub_paths = [os.path.expanduser(tub) for tub in tubs] dataset = TubDataset(config=cfg, tub_paths=all_tub_paths, seq_size=kl.seq_size()) training_records, validation_records \ = train_test_split(dataset.get_records(), shuffle=True, test_size=(1. - cfg.TRAIN_TEST_SPLIT)) print(f'Records # Training {len(training_records)}') print(f'Records # Validation {len(validation_records)}') # We need augmentation in validation when using crop / trapeze training_pipe = BatchSequence(kl, cfg, training_records, is_train=True) validation_pipe = BatchSequence(kl, cfg, validation_records, is_train=False) tune = tf.data.experimental.AUTOTUNE dataset_train = training_pipe.create_tf_data().prefetch(tune) dataset_validate = validation_pipe.create_tf_data().prefetch(tune) train_size = len(training_pipe) val_size = len(validation_pipe) ### training/validation length limit. Large validation datasets cause memory leaks. train_limit = cfg.TRAIN_LIMIT train_len = len(training_records) if train_limit is not None and train_len > train_limit: train_decrease = train_limit / train_len _train_size = math.ceil(train_size * train_decrease) print(f'train steps decrease from {train_size} to {_train_size}') train_size = _train_size val_limit = cfg.VALIDATION_LIMIT val_len = len(validation_records) if val_limit is not None and val_len > val_limit: val_decrease = val_limit / val_len _val_size = math.ceil(val_size * val_decrease) print(f'val steps decrease from {val_size} to {_val_size}') val_size = _val_size assert val_size > 0, "Not enough validation data, decrease the batch " \ "size or add more data." history = kl.train(model_path=model_path, train_data=dataset_train, train_steps=train_size, batch_size=cfg.BATCH_SIZE, validation_data=dataset_validate, validation_steps=val_size, epochs=cfg.MAX_EPOCHS, verbose=cfg.VERBOSE_TRAIN, min_delta=cfg.MIN_DELTA, use_early_stop=cfg.USE_EARLY_STOP, patience=cfg.EARLY_STOP_PATIENCE, show_plot=cfg.SHOW_PLOT) if getattr(cfg, 'CREATE_TF_LITE', True): tf_lite_model_path = f'{base_path}.tflite' keras_model_to_tflite(model_path, tf_lite_model_path) if getattr(cfg, 'CREATE_TENSOR_RT', False): # load h5 (ie. keras) model model_rt = load_model(model_path) # save in tensorflow savedmodel format (i.e. directory) model_rt.save(f'{base_path}.savedmodel') # pass savedmodel to the rt converter saved_model_to_tensor_rt(f'{base_path}.savedmodel', f'{base_path}.trt') database_entry = { 'Number': model_num, 'Name': os.path.basename(base_path), 'Type': str(kl), 'Tubs': tub_paths, 'Time': time(), 'History': history.history, 'Transfer': os.path.basename(transfer) if transfer else None, 'Comment': comment, 'Config': str(cfg) } database.add_entry(database_entry) database.write() return history
##model = Sequential() ## ##model.add(Dense(8, activation='relu', input_dim=13)) ##model.add(Dropout(0.05)) ##model.add(Dense(3, activation='softmax')) ## ##model.compile(loss='categorical_crossentropy', optimizer='adam', ## metrics=['accuracy']) ## ##model.fit(train_x, train_y, epochs=20, validation_split=0.1, ## batch_size=1, verbose=2) model = load_model("Wine_100.model") score = model.evaluate(test_x, test_y) print('Accuracy: %0.2f%%' % (score[1] * 100)) result = model.predict(test_x) for i in range(len(test_x)): if np.argmax(result[i]) == 0: print('Predicted to be of Class 1! Actual class:', test_y[i]) elif np.argmax(result[i]) == 1: print('Predicted to be of Class 2! Actual class:', test_y[i]) elif np.argmax(result[i]) == 2:
def __init__(self): self.HOPS = 5 self.DATASET = 'twitter' # 'restaurant', 'laptop' self.POLARITIES_DIM = 3 self.EMBEDDING_DIM = 200 self.LEARNING_RATE = 0.01 self.LSTM_PARAMS = { 'units': 200, 'activation': 'tanh', 'recurrent_activation': 'sigmoid', 'kernel_initializer': initializers.RandomUniform(minval=-0.003, maxval=0.003), 'recurrent_initializer': initializers.RandomUniform(minval=-0.003, maxval=0.003), 'bias_initializer': initializers.RandomUniform(minval=-0.003, maxval=0.003), 'kernel_regularizer': regularizers.l2(0.001), 'recurrent_regularizer': regularizers.l2(0.001), 'bias_regularizer': regularizers.l2(0.001), 'dropout': 0, 'recurrent_dropout': 0, } self.MAX_SEQUENCE_LENGTH = 40 self.MAX_ASPECT_LENGTH = 2 self.ITERATION = 500 self.BATCH_SIZE = 200 self.texts_raw_indices, self.texts_left_indices, self.aspects_indices, self.texts_right_indices, \ self.polarities_matrix, \ self.embedding_matrix, \ self.tokenizer = \ read_dataset(type=self.DATASET, mode='train', embedding_dim=self.EMBEDDING_DIM, max_seq_len=self.MAX_SEQUENCE_LENGTH, max_aspect_len=self.MAX_ASPECT_LENGTH) if os.path.exists('ram_saved_model.h5'): print('loading saved model...') self.model = load_model('ram_saved_model.h5') else: print('Build model...') inputs_sentence = Input(shape=(self.MAX_SEQUENCE_LENGTH * 2 + self.MAX_ASPECT_LENGTH, ), name='inputs_sentence') inputs_aspect = Input(shape=(self.MAX_ASPECT_LENGTH, ), name='inputs_aspect') sentence = Embedding(input_dim=len(self.tokenizer.word_index) + 1, output_dim=self.EMBEDDING_DIM, input_length=self.MAX_SEQUENCE_LENGTH * 2 + self.MAX_ASPECT_LENGTH, weights=[self.embedding_matrix], trainable=False, name='sentence_embedding')(inputs_sentence) aspect = Embedding(input_dim=len(self.tokenizer.word_index) + 1, output_dim=self.EMBEDDING_DIM, input_length=self.MAX_ASPECT_LENGTH, weights=[self.embedding_matrix], trainable=False, name='aspect_embedding')(inputs_aspect) memory = Bidirectional(LSTM(**self.LSTM_PARAMS, return_sequences=True), name='memory')(sentence) aspect = Bidirectional(LSTM(**self.LSTM_PARAMS, return_sequences=True), name='aspect')(aspect) x = Lambda(lambda xin: K.mean(xin, axis=1), name='aspect_mean')(aspect) SharedAttention = Attention(name='shared_attention') for i in range(self.HOPS): x = SharedAttention((memory, x)) x = Dense(self.POLARITIES_DIM)(x) predictions = Activation('softmax')(x) model = Model(inputs=[inputs_sentence, inputs_aspect], outputs=predictions) model.summary() model.compile(loss='categorical_crossentropy', optimizer=optimizers.Adam(lr=self.LEARNING_RATE), metrics=['acc']) # plot_model(model, to_file='model.png') self.model = model
def load_checkpoint(self, folder=None, filename='checkpoint.h5'): self.checkpoint_name = 'model ' + filename folder_path = self.get_path(folder) file_path = folder_path / filename self.model = load_model(file_path)
# print(key) # After you are done # f.close() filename = '/home/giriraj/CNNClassifier/Example/ld-model-cnn-classifier-example/model/to-deploy/trained_model_2019-10' \ '-02 18-12-20.h5' # with h5py.File(filename, 'r') as f: # List all groups # print("Keys: %s" % f.keys()) # a_group_key = list(f.keys())[0] # Get the data # data = list(f[a_group_key]) # print(f.values()) f = h5py.File(filename, 'r') print(list(f.keys())) opt_weight = f['optimizer_weights'] model_weight = f['model_weights'] print(f['model_weights']) print(f['optimizer_weights']) print(opt_weight) print(model_weight) model = load_model(filename) print(model.summary())
def load_model(filename): model = load_model(filename) return model;
def load_models(self): self.g = models.load_model(model_g_path) self.d = models.load_model(model_d_path)
x_test = (x_test.astype(np.float32) - 127.5) / 127.5 # convert x_train with a shape of (60000, 28, 28) to (60000, 784) so we have # 784 columns per row return (x_test, y_test) if __name__ == '__main__': # Definición de variables channels = 3 # Canales de la imagen # Cargar datos para test x_test, y_test = load_minst_data() if channels == 1: x_test = np.expand_dims(x_test, axis=3) # Cargar discriminador discriminator = load_model('saved_model/discriminator.h5') # Evaluar discriminador test_res = discriminator.predict(x_test[:1000]) test_res = np.argmax(test_res[1], 1) # Evaluar capacidad correct_prediction = np.equal(test_res, y_test[:1000]) accuracy = np.mean(correct_prediction.astype(np.float32)) print("Accuracy: {} %".format(accuracy * 100)) # Ejemplo print(y_test[:20]) print(test_res[:20])
304, 304, ), verbose=True, name_policy='short') k_model.summary() k_model.save('my_model.h5') output = model(input_var) check_error(output, k_model, input_np) ## check the error between .pth and .h5 ##step3: load .h5 and .h5 to .pb tf.keras.backend.clear_session() tf.keras.backend.set_learning_phase(0) ##不可少, my_model = load_model('my_model.h5') h5_to_pb(my_model, output_dir='./model/', model_name='model.pb') ##step4: load .pb and test .pb pb_path = './model/model.pb' with tf.Session() as sess: tf.global_variables_initializer().run() graph_def = tf.GraphDef() with tf.gfile.GFile(pb_path, 'rb') as f: graph_def.ParseFromString(f.read()) _ = tf.import_graph_def(graph_def, name="") pic_file = './datasets/data' pic_list = os.listdir(pic_file) for name in pic_list:
import numpy as np from tensorflow.python.keras.callbacks import EarlyStopping from tensorflow.python.keras.layers import LSTM, Dense, Input from tensorflow.python.keras.models import Model, load_model, Sequential import tensorflow as tf model = load_model('Pend_State_LSTM.keras') model.save('Pend_State_LSTM.h5') reconstructed = load_model('Pend_State_LSTM.h5') print(model.summary()) print(reconstructed.summary())
def model_to_estimator(keras_model=None, keras_model_path=None, custom_objects=None, model_dir=None, config=None): """Constructs an `Estimator` instance from given keras model. For usage example, please see @{$programmers_guide/estimators$creating_estimators_from_keras_models}. Args: keras_model: A compiled Keras model object. This argument is mutually exclusive with `keras_model_path`. keras_model_path: Path to a compiled Keras model saved on disk, in HDF5 format, which can be generated with the `save()` method of a Keras model. This argument is mutually exclusive with `keras_model`. custom_objects: Dictionary for custom objects. model_dir: Directory to save Estimator model parameters, graph, summary files for TensorBoard, etc. config: Configuration object. Returns: An Estimator from given keras model. Raises: ValueError: if neither keras_model nor keras_model_path was given. ValueError: if both keras_model and keras_model_path was given. ValueError: if the keras_model_path is a GCS URI. ValueError: if keras_model has not been compiled. """ if not (keras_model or keras_model_path): raise ValueError( 'Either `keras_model` or `keras_model_path` needs to be provided.') if keras_model and keras_model_path: raise ValueError( 'Please specity either `keras_model` or `keras_model_path`, ' 'but not both.') if not keras_model: if keras_model_path.startswith( 'gs://') or 'storage.googleapis.com' in keras_model_path: raise ValueError( '%s is not a local path. Please copy the model locally first.' % keras_model_path) logging.info('Loading models from %s', keras_model_path) keras_model = models.load_model(keras_model_path) else: logging.info('Using the Keras model provided.') keras_model = keras_model if not hasattr(keras_model, 'optimizer') or not keras_model.optimizer: raise ValueError( 'The given keras model has not been compiled yet. ' 'Please compile the model with `model.compile()` ' 'before calling `model_to_estimator()`.') if isinstance(config, dict): config = run_config_lib.RunConfig(**config) keras_model_fn = _create_keras_model_fn(keras_model, custom_objects) estimator = estimator_lib.Estimator( keras_model_fn, model_dir=model_dir, config=config) # Check if we need to call get_weights: if _any_variable_initialized(): keras_weights = keras_model.get_weights() # Warn if config passed to estimator tries to update GPUOptions. If a # session has already been created, the GPUOptions passed to the first # session sticks. if estimator._session_config.HasField('gpu_options'): logging.warning( 'The Keras backend session has already been set. ' 'The _session_config passed to model_to_estimator will not be used.') else: # Pass the config into keras backend's default session. sess = session.Session(config=estimator._session_config) K.set_session(sess) keras_weights = None if keras_model._is_graph_network: # TODO(yifeif): move checkpoint initialization to scaffold.init_fn _save_first_checkpoint(keras_model, estimator, custom_objects, keras_weights) elif keras_model.built: logging.warning('You are creating an Estimator from a Keras model ' 'manually subclassed from `Model`, that was ' 'already called on some inputs (and thus already had ' 'weights). We are currently unable to preserve ' 'the model\'s state (its weights) ' 'as part of the estimator ' 'in this case. Be warned that the estimator ' 'has been created using ' 'a freshly initialized version of your model.\n' 'Note that this doesn\'t affect the state of the ' 'model instance you passed as `keras_model` argument.') return estimator
def load(self, path, type): self.model = load_model(path) self.type = type return self.model