def test_maml_model(self, num_inner_loop_steps): model_dir = os.path.join(FLAGS.test_tmpdir, str(num_inner_loop_steps)) gin.bind_parameter('tf.estimator.RunConfig.save_checkpoints_steps', _MAX_STEPS // 2) if tf.io.gfile.exists(model_dir): tf.io.gfile.rmtree(model_dir) mock_base_model = mocks.MockT2RModel( preprocessor_cls=noop_preprocessor.NoOpPreprocessor) mock_tf_model = MockMAMLModel( base_model=mock_base_model, num_inner_loop_steps=num_inner_loop_steps) # Note, we by choice use the same amount of conditioning samples for # inference as well during train and change the model for eval/inference # to only produce one output sample. mock_input_generator_train = MockMetaInputGenerator( batch_size=_BATCH_SIZE, num_condition_samples_per_task=_NUM_CONDITION_SAMPLES_PER_TASK, num_inference_samples_per_task=_NUM_CONDITION_SAMPLES_PER_TASK) mock_input_generator_train.set_specification_from_model( mock_tf_model, mode=tf.estimator.ModeKeys.TRAIN) mock_input_generator_eval = MockMetaInputGenerator( batch_size=_BATCH_SIZE, num_condition_samples_per_task=_NUM_CONDITION_SAMPLES_PER_TASK, num_inference_samples_per_task=1) mock_input_generator_eval.set_specification_from_model( mock_tf_model, mode=tf.estimator.ModeKeys.TRAIN) mock_export_generator = MockMetaExportGenerator( num_condition_samples_per_task=_NUM_CONDITION_SAMPLES_PER_TASK, num_inference_samples_per_task=1) train_eval.train_eval_model( t2r_model=mock_tf_model, input_generator_train=mock_input_generator_train, input_generator_eval=mock_input_generator_eval, max_train_steps=_MAX_STEPS, model_dir=model_dir, export_generator=mock_export_generator, create_exporters_fn=train_eval.create_default_exporters) export_dir = os.path.join(model_dir, 'export') # best_exporter_numpy, best_exporter_tf_example. self.assertLen(tf.io.gfile.glob(os.path.join(export_dir, '*')), 4) numpy_predictor_fn = contrib_predictor.from_saved_model( tf.io.gfile.glob(os.path.join(export_dir, 'best_exporter_numpy', '*'))[-1]) feed_tensor_keys = sorted(numpy_predictor_fn.feed_tensors.keys()) self.assertCountEqual( ['condition/features/x', 'condition/labels/y', 'inference/features/x'], feed_tensor_keys, ) tf_example_predictor_fn = contrib_predictor.from_saved_model( tf.io.gfile.glob( os.path.join(export_dir, 'best_exporter_tf_example', '*'))[-1]) self.assertCountEqual(['input_example_tensor'], list(tf_example_predictor_fn.feed_tensors.keys()))
def __init__(self, conf, **kwargs): self.conf = conf for attr in conf: setattr(self, attr, conf[attr]) self.zdy = {} #init embedding self.init_embedding() #load train data csv = pd.read_csv(self.ori_path, header=0, sep="\t", error_bad_lines=False) if 'text' in csv.keys() and 'target' in csv.keys(): #format: text \t target #for this format, the size for each class should be larger than 2 self.text_list = list(csv['text']) self.label_list = list(csv['target']) elif 'text_a' in csv.keys() and 'text_b' in csv.keys( ) and 'target' in csv.keys(): #format: text_a \t text_b \t target #for this format, target value can only be choosen from 0 or 1 self.text_a_list = list(csv['text_a']) self.text_b_list = list(csv['text_b']) self.text_list = self.text_a_list + self.text_b_list self.label_list = list(csv['target']) subdirs = [ os.path.join(self.export_dir_path, x) for x in os.listdir(self.export_dir_path) if 'temp' not in (x) ] latest = str(sorted(subdirs)[-1]) self.predict_fn = predictor.from_saved_model(latest)
def main(): tf.logging.set_verbosity(tf.logging.INFO) models_dir = 'training/single_column/models3' # load the data df, stim = load_data() columns = list(df.columns)[1:] test_data = {'image': stim[:50, 16:-16, 16:-16]} # get predictions from all the models column_predictions = {} for i, column_name in enumerate(columns): print('Predicting values for the column "%s"...' % column_name) # find the model directory best_models_path = root_dir('%s/%d_%s/export/best' % (models_dir, i, column_name)) latest_model_subdir = sorted(os.listdir(best_models_path), reverse=True)[0] latest_model_dir = os.path.join(best_models_path, latest_model_subdir) # create predictor predict_fn = predictor.from_saved_model(latest_model_dir) # get predictions column_predictions[column_name] = predict_fn(test_data)['spike'] # generate a submission file with open(root_dir('data/submission/single_column/3.csv'), 'w') as f: writer = csv.writer(f) writer.writerow(['Id'] + columns) for i in range(len(test_data['image'])): writer.writerow([i] + [column_predictions[column_name][i] for column_name in columns])
def demonstrate(music_file, model_path, genres_metadata_fn): processor = get_processor(PROCESSOR) genres = load_genres(genres_metadata_fn) features = processor(music_file) features = np.reshape(features, (1, ) + features.shape + (1, )) predict_fn = predictor.from_saved_model(model_path) # estimator = tf.estimator.Estimator( # model_fn=model_fn, # config=tf.estimator.RunConfig( # save_checkpoints_steps=10, # model_dir=model_path, # ) # ) # # predictions = estimator.predict( # input_fn=tf.estimator.inputs.numpy_input_fn( # x={'feature': features}, # shuffle=False # ) # ) predictions = predict_fn({'feature': features}) result = extract_genres(predictions['genres_top'], genres) for genre_title, prob in result: print(genre_title, ':', prob)
def test_forward_in_exported(self): def serving_input_fn(): features_ph = { 'x': array_ops.placeholder(dtypes.float32, [None]), 'id': array_ops.placeholder(dtypes.int32, [None]) } features = { key: array_ops.expand_dims(tensor, -1) for key, tensor in features_ph.items() } return estimator_lib.export.ServingInputReceiver(features, features_ph) def input_fn(): return {'x': [[3.], [5.]], 'id': [[101], [102]]}, [[1.], [2.]] # create estimator feature_columns = [fc.numeric_column('x')] estimator = linear.LinearRegressor(feature_columns) estimator.train(input_fn=input_fn, steps=1) estimator = extenders.forward_features(estimator, 'id') # export saved model export_dir, tmpdir = self._export_estimator(estimator, serving_input_fn) # restore model predict_fn = from_saved_model(export_dir, signature_def_key='predict') predictions = predict_fn({'x': [3], 'id': [101]}) # verify that 'id' exists in predictions self.assertIn('id', predictions) self.assertEqual(101, predictions['id']) # Clean up. gfile.DeleteRecursively(tmpdir)
def local_predict(self, p): """Read local saved protocol buffer file and do prediction :param p: Parameters - datasource: DataFrame or file path to predict - is_src_file: Is datasource a DataFrame object or file path, True: DataFrame object, False: file path - model_name: Model name in `dnn` `neu_mf` - job_dir: model checkpoint directory :return: Prediction result """ from tensorflow.contrib import predictor p = self.merge_params(p) export_dir = self.service.find_latest_expdir(p) predict_fn = predictor.from_saved_model(export_dir, signature_def_key='predict') if p.is_src_file: datasource = p.datasource else: with tf.gfile.FastGFile(p.test_files, 'rb') as fp: datasource = pd.read_pickle(fp) base = np.zeros(len(datasource)) open_flag = datasource.open == '1' preds = predict_fn(datasource[open_flag]).get('predictions').ravel() # Column sales has been take np.log1p, so predict take np.expm preds = np.round(np.expm1(preds)) base[open_flag] = preds return base
def after_run(self, run_context, run_values): if self._step % 200 == 0: current_time = time.time() duration = current_time - self._start_time self._start_time = current_time returns = run_values.results examples_per_sec = 10 * 8 / duration sec_per_batch = float(duration / 10) predict_fn = predictor.from_saved_model(export_dir) predictions = predict_fn({"input": returns[2]}) y_pred = np.transpose(predictions['classes'][:, 0]) y_est = np.argmax(returns[3], axis=1) cur_acc = sum(y_pred == y_est) / len(y_est) * 100 self.outer_self._visualize_training_generator(self.fig, self.ax, returns[2], returns[3], fs=self.fs) self.outer_self._visualize_training_generator( self.fig_real, self.ax_real, returns[4], returns[5], fs=self.fs, real_signals=True) format_str = ( '%s: step %d, loss = (g: %.2f, d: %.2f) (%.1f examples/sec; %.3f ' 'sec/batch), accuracy: %.2f') print(format_str % (datetime.now(), self._step, returns[0], returns[1], examples_per_sec, sec_per_batch, cur_acc))
def main(unused_argv): # Export model_fn to only use decoder export_tf_model(FLAGS.export_path) # Find latest frozen pb subdirs = [ x for x in Path(FLAGS.export_path + '/frozen_pb').iterdir() if x.is_dir() and 'temp' not in str(x) ] latest = str(sorted(subdirs)[-1]) # Create predictor predict_fn = predictor.from_saved_model(latest) dataset = tf.data.TFRecordDataset(FLAGS.test_path)\ .repeat(1)\ .map(extract_features, num_parallel_calls=4)\ .batch(512)\ .prefetch(8) iterator = dataset.make_one_shot_iterator() # Eager execution for obtaining batch data from dataset z_val = np.zeros(z_dim)[None] x_arr = np.zeros(input_dim)[None] mu_arr = np.zeros(z_dim)[None] latent_interpolation(predict_fn) ''' t-SNE visualization
def predict(args): # Read in the csv with the file names you would want to predict on file_names = pd.read_csv( args.csv, dtype=object, keep_default_na=False, na_values=[]).as_matrix() # We trained on the first 4 subjects, so we predict on the rest file_names = file_names[-N_VALIDATION_SUBJECTS:] # From the model_path, parse the latest saved model and restore a # predictor from it export_dir = [os.path.join(args.model_path, o) for o in sorted( os.listdir(args.model_path)) if os.path.isdir( os.path.join(args.model_path, o)) and o.isdigit()][-1] print('Loading from {}'.format(export_dir)) my_predictor = predictor.from_saved_model(export_dir) # Iterate through the files, predict on the full volumes and compute a Dice # coefficient mae = [] for output in read_fn(file_references=file_names, mode=tf.estimator.ModeKeys.EVAL, params=READER_PARAMS): t0 = time.time() # Parse the read function output and add a dummy batch dimension as # required img = output['features']['x'] lbl = output['labels']['y'] test_id = output['img_id'] # We know, that the training input shape of [64, 96, 96] will work with # our model strides, so we collect several crops of the test image and # average the predictions. Alternatively, we could pad or crop the input # to any shape that is compatible with the resolution scales of the # model: num_crop_predictions = 4 crop_batch = extract_random_example_array( image_list=img, example_size=[64, 96, 96], n_examples=num_crop_predictions) y_ = my_predictor.session.run( fetches=my_predictor._fetch_tensors['logits'], feed_dict={my_predictor._feed_tensors['x']: crop_batch}) # Average the predictions on the cropped test inputs: y_ = np.mean(y_) # Calculate the absolute error for this subject mae.append(np.abs(y_ - lbl)) # Print outputs print('id={}; pred={:0.2f} yrs; true={:0.2f} yrs; run time={:0.2f} s; ' ''.format(test_id, y_, lbl[0], time.time() - t0)) print('mean absolute err={:0.3f} yrs'.format(np.mean(mae)))
def __init__(self, lis_topic='/velodyne_points', pub_topic='/class_topic', field_num=4): """ Init function. Args: `lis_topic`:the pointcloud message topic to be listenned, default '/velodyne_points' `pub_topic`:the pointcloud message topic to be published, default '/class_topic' Raise: """ self.lis_topic = lis_topic self.pub_topic = pub_topic self.point_field = self.make_point_field(field_num) # load estimator saved model as serving predictor self.fast_predict = predictor.from_saved_model('./squeeze_seg/saved/') # publisher self.pcmsg_pub = rospy.Publisher(self.pub_topic, PointCloud2,queue_size=1) # ros node init rospy.init_node('tasqueezeseg_node', anonymous=True) # listener rospy.Subscriber(self.lis_topic, PointCloud2, self.pcmsg_cb) print 'now will listen : {}, and publish : {}'.format(self.lis_topic, self.pub_topic) # spin() simply keeps python from exiting until this node is stopped rospy.spin()
def run(test_dataset_path, keys_to_features, batch_size, model_dir): input_shard_file_list = shardedfile_to_filelist(test_dataset_path) parse_fn = (lambda x: parser(x, keys_to_features)) dataset = tf.data.Dataset(input_shard_file_list) dataset = dataset.repeat(1) dataset = dataset.map(parse_fn, 32).batch(batch_size) predict_fn = predictor.from_saved_model(model_dir) while True: try: images, labels = dataset.make_one_shot_iterator().get_next() except tf.errors.OutOfRangeError: break predictor_fn({FLAGS.input_name: images}) predictor_fn = tf.contrib.predictor.from_saved_model( FLAGS.model_dir, signature_def_key=FLAGS.signature_def_key, tags=FLAGS.tags) times = [] for _ in xrange(FLAGS.num_runs): start = time.time() predictor_fn({FLAGS.input_name: batch}) end = time.time() times.append(end - start) if FLAGS.output_tsv is not None: with open(FLAGS.output_tsv, 'w') as fout: writer = csv.writer(fout, delimiter='\t') writer.writerows([[t] for t in times]) print('Min time: %s' % min(times)) print('Median time: %s' % np.median(times))
def __init__(self, model_dir, w2v_dir): self.config = Config() self.mecab = Mecab() self.wv = Word2Vec(os.path.join(w2v_dir, 'w2v.pkl')) self.mapper = IdMapper(os.path.join(model_dir, 'tag.pkl'), vocab_size=self.config.max_tags) self.predictor = from_saved_model(export_dir=model_dir)
def load_pb(pb_dir): ''' 加载保存的pb模型,该方法适用于线下测试pb模型,部署到线上就需要使用tf serving的方式来预测 测试例子: 通用是分类模型。 predict_fn, input_names, output_names = load_pb('pb') tokenizer = BertTokenizer.from_pretrained('ckpt', do_lower_case=True) inputs = tokenizer.encode("名人堂故事之威斯康辛先生:大范&联盟总裁的前辈", add_special_tokens=True, max_length=32, pad_to_max_length=True) prediction = predict_fn( { 'input_ids': [inputs['input_ids']], 'input_mask': [inputs['input_mask']], 'token_type_ids': [inputs['token_type_ids']] } ) print(prediction) 输出{'logits': array([[ 5.1162577, -3.842629 , -0.2090739, 1.629769 , -2.6358554]], dtype=float32)} :param pb_dir: 保存的pb模型的文件夹 :return: 预测fn, fn接收的输入的names,fn输出的names ''' predict_fn = predictor.from_saved_model(pb_dir) input_names = list(predict_fn._feed_tensors.keys()) output_names = list(predict_fn._fetch_tensors.keys()) return predict_fn, input_names, output_names
def main(unused_argv): # set test image path img_path = './imgs/dog.jpg' # export model_fn to only use decoder export_tf_model(FLAGS.export_path) # fetch latest frozen pb subdirs = [ x for x in Path(FLAGS.export_path + '/frozen_pb').iterdir() if x.is_dir() and 'temp' not in str(x) ] latest = str(sorted(subdirs)[-1]) # initiate predictor predict_fn = predictor.from_saved_model(latest) # read and normalize image x = Utils.load_img(img_path)[None] / 255.0 dict_in = {'x': x} # inference; ordered in (H,W,C) code = np.squeeze(predict_fn(dict_in)['code']) # dump input and IA after autoencoder ia = x oa = code # pickle data Utils.pack(ia, './dump/ia.pkl') Utils.pack(oa, './dump/oa_0.pkl')
def predict(args): # Read in the csv with the file names you would want to predict on file_names = pd.read_csv( args.csv, dtype=object, keep_default_na=False, na_values=[]).as_matrix() # We trained on the first 4 subjects, so we predict on the rest file_names = file_names[-N_VALIDATION_SUBJECTS:] # From the model_path, parse the latest saved model and restore a # predictor from it export_dir = [os.path.join(args.model_path, o) for o in os.listdir(args.model_path) if os.path.isdir(os.path.join(args.model_path, o)) and o.isdigit()][-1] print('Loading from {}'.format(export_dir)) my_predictor = predictor.from_saved_model(export_dir) # Fetch the output probability op of the trained network y_prob = my_predictor._fetch_tensors['y_prob'] num_classes = y_prob.get_shape().as_list()[-1] # Iterate through the files, predict on the full volumes and compute a Dice # coefficient for output in read_fn(file_references=file_names, mode=tf.estimator.ModeKeys.EVAL, params=READER_PARAMS): t0 = time.time() # Parse the read function output and add a dummy batch dimension as # required img = np.expand_dims(output['features']['x'], axis=0) lbl = np.expand_dims(output['labels']['y'], axis=0) # Do a sliding window inference with our DLTK wrapper pred = sliding_window_segmentation_inference( session=my_predictor.session, ops_list=[y_prob], sample_dict={my_predictor._feed_tensors['x']: img}, batch_size=32)[0] # Calculate the prediction from the probabilities pred = np.argmax(pred, -1) # Calculate the Dice coefficient dsc = np.nanmean(metrics.dice(pred, lbl, num_classes)[1:]) # Save the file as .nii.gz using the header information from the # original sitk image output_fn = os.path.join(args.model_path, '{}_seg.nii.gz'.format(output['subject_id'])) new_sitk = sitk.GetImageFromArray(pred[0].astype(np.int32)) new_sitk.CopyInformation(output['sitk']) sitk.WriteImage(new_sitk, output_fn) # Print outputs print('Id={}; Dice={:0.4f}; time={:0.2} secs; output_path={};'.format( output['subject_id'], dsc, time.time() - t0, output_fn))
def __init__(self, str_config, params): #reading configuration file self.configuration = conf.ConfigurationFile(str_config, params['modelname']) self.modelname = self.configuration.get_model_name() self.processFun = imgproc.get_process_fun(self.configuration.get_process_fun()) #snapShotDir must exist assert os.path.exists(self.configuration.get_snapshot_dir()), "Path {} does not exist".format(self.configuration.get_snapshot_dir()) #loading mean.dat and metadata.dat filename_mean = os.path.join(self.configuration.get_data_dir(), "mean.dat") metadata_file = os.path.join(self.configuration.get_data_dir(), "metadata.dat") #reading metadata self.image_shape = np.fromfile(metadata_file, dtype=np.int32) # print("image shape: {}".format(self.image_shape)) #load mean mean_img = np.fromfile(filename_mean, dtype=np.float32) self.mean_img = np.reshape(mean_img, self.image_shape.tolist()) #defining files for training and test #loading model self.predictor = predictor.from_saved_model(os.path.join(self.configuration.get_data_dir(),"cnn-model")) print("predictor loaded OK") mapping_file = os.path.join(self.configuration.get_data_dir(), "mapping.txt") self.mapping = False if os.path.exists(mapping_file): self.class_mapping = pmap.PMapping(mapping_file) self.mapping = True self.device = params['device']
def unittest(): predict_fn = predictor.from_saved_model(args.SAVED_MODEL, signature_def_key="predict") (_, _), (x_test, y_test) = tf.keras.datasets.mnist.load_data() for i in range(10): pred = predict_fn({'images': x_test[i].reshape([-1, 28, 28, 1])}) print(y_test[i], pred['class_ids'][0][0])
def test_create_serving_input_receiver_numpy(self): (model_dir, mock_t2r_model, prediction_ref) = self._train_and_eval_reference_model('numpy') exporter = default_export_generator.DefaultExportGenerator() exporter.set_specification_from_model(mock_t2r_model) # Export trained serving estimator. estimator_exporter = tf.estimator.Estimator( model_fn=mock_t2r_model.model_fn, config=tf.estimator.RunConfig(model_dir=model_dir)) serving_input_receiver_fn = ( exporter.create_serving_input_receiver_numpy_fn()) exported_savedmodel_path = estimator_exporter.export_saved_model( export_dir_base=model_dir, serving_input_receiver_fn=serving_input_receiver_fn, checkpoint_path=tf.train.latest_checkpoint(model_dir)) # Load trained and exported serving estimator, run prediction and assert # it is the same as before exporting. feed_predictor_fn = contrib_predictor.from_saved_model( exported_savedmodel_path) mock_input_generator = mocks.MockInputGenerator(batch_size=BATCH_SIZE) features, labels = mock_input_generator.create_numpy_data() for pos, value in enumerate(prediction_ref): actual = feed_predictor_fn({'x': features[pos, :].reshape(1, -1) })['logit'].flatten() predicted = value['logit'].flatten() np.testing.assert_almost_equal(actual=actual, desired=predicted, decimal=4) if labels[pos] > 0: self.assertGreater(predicted[0], 0) else: self.assertLess(predicted[0], 0)
def classify(text): text_series = pd.Series([text]) predict_x = np.array(list( vocab_processor.transform(text_series))).flatten() print(predict_x) print(predict_x.shape) with tf.Session() as sess: tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], export_dir) predict_fn = predictor.from_saved_model(export_dir) model_input = tf.train.Example(features=tf.train.Features( feature={ 'words': tf.train.Feature(int64_list=tf.train.Int64List( value=predict_x)) })) model_input = model_input.SerializeToString() output_dict = predict_fn({'predictor_inputs': [model_input]}) predicted_class = output_dict['class'][0] print(predicted_class) topic = news_classes.class_map[str(predicted_class)] return topic '''
def predict(args): # Read in the csv with the file names you would want to predict on file_names = pd.read_csv(args.csv, dtype=object, keep_default_na=False, na_values=[]).as_matrix() # We trained on the first 4 subjects, so we predict on the rest file_names = file_names[-N_VALIDATION_SUBJECTS:] # From the model_path, parse the latest saved model and restore a # predictor from it export_dir = \ [os.path.join(args.model_path, o) for o in sorted(os.listdir(args.model_path)) if os.path.isdir(os.path.join(args.model_path, o)) and o.isdigit()][-1] print('Loading from {}'.format(export_dir)) my_predictor = predictor.from_saved_model(export_dir) # Iterate through the files, predict on the full volumes and compute a Dice # coefficient accuracy = [] for output in read_fn(file_references=file_names, mode=tf.estimator.ModeKeys.EVAL, params=READER_PARAMS): t0 = time.time() # Parse the read function output and add a dummy batch dimension as # required img = output['features']['x'] lbl = output['labels']['y'] test_id = output['img_id'] # We know, that the training input shape of [64, 96, 96] will work with # our model strides, so we collect several crops of the test image and # average the predictions. Alternatively, we could pad or crop the input # to any shape that is compatible with the resolution scales of the # model: num_crop_predictions = 4 crop_batch = extract_random_example_array( image_list=img, example_size=[64, 96, 96], n_examples=num_crop_predictions) y_ = my_predictor.session.run( fetches=my_predictor._fetch_tensors['y_prob'], feed_dict={my_predictor._feed_tensors['x']: crop_batch}) # Average the predictions on the cropped test inputs: y_ = np.mean(y_, axis=0) predicted_class = np.argmax(y_) # Calculate the accuracy for this subject accuracy.append(predicted_class == lbl) # Print outputs print('id={}; pred={}; true={}; run time={:0.2f} s; ' ''.format(test_id, predicted_class, lbl[0], time.time() - t0)) print('accuracy={}'.format(np.mean(accuracy)))
def transformation(): """This method reads in the data (json object) sent with the request and returns a prediction as response """ data = None export_path = 'exported_model/' if flask.request.content_type == 'application/json': data = flask.request.data.decode('utf-8') data = pd.read_json(data, lines=True) X_scaler = joblib.load(os.path.join(paths.model('X_scaler.save'))) scaled_data = X_scaler.transform(data.values) predict_fn = predictor.from_saved_model(paths.model(export_path)) predictions = predict_fn({'input': scaled_data}) prediction = predictions['earnings'][0][0] Y_scaler = joblib.load(os.path.join(paths.model('Y_scaler.save'))) true_prediction = {'earnings': round((float(prediction) - Y_scaler.min_[0]) / Y_scaler.scale_[0], 3)} true_prediction = json.dumps(true_prediction) else: return flask.Response(response='This predictor only supports JSON data', status=415, mimetype='text/plain') result = true_prediction return flask.Response(response=result, status=200, mimetype='application/json')
def __init__(self, database=None, mode='both'): self.mode = mode if mode != 'both' and mode != 'only_detect' and mode != 'only_identify': raise TypeError("Doesn't support mode " + mode) if mode != 'only_detect' and database is None: raise TypeError( "You have to pass database with mode '{}'".format(mode)) logging.info( "An vision object has been created with mode `{}`".format(mode)) if mode != 'only_detect': self.__embedding_encoder = predictor.from_saved_model( "exported_model") self.__database_empty = True self.__classifier = KNeighborsClassifier(n_neighbors=5, algorithm='ball_tree', weights='distance') if mode != 'only_identify': self.__pnet, self.__rnet, self.__onet = self.load_detect_face_model( device=vision_config.DETECT_DEVICE) if mode != 'only_detect': self.__database = database self.__person, self.__feature, self.__label = database.extract_features_labels( ) if len(self.__feature) > 0: self.__database_empty = False self.__classifier.fit(self.__feature, self.__label)
def __init__(self, language="lv", model_dir=None, output_dir=None, saved_model_dir=None): if language not in ["lv", "en"]: raise NotImplementedError("only 'lv' and 'en' languages are supported") FLAGS([ 'BERT_NER', '--do_lower_case=False', ]) if saved_model_dir and os.path.exists(saved_model_dir): FLAGS.vocab_file=os.path.join(saved_model_dir, 'vocab.txt') FLAGS.label_file=os.path.join(saved_model_dir, 'labels.txt') else: print("Saved model not found in {}".format(saved_model_dir)) self.processor = NerProcessor() self.label_list = self.processor.labels self.id2label = {key: value for key, value in enumerate(self.label_list)} self.tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) # Load from saved_model if present, else load from estimator and export to saved_model try: subdirs = [x for x in Path(saved_model_dir).iterdir() if x.is_dir() and'temp' not in str(x)] latest = str(sorted(subdirs)[-1]) self.predict_fn = predictor.from_saved_model(latest) except: print("Failed to load saved model {}".format(latest)) print("NER predictor init done.")
def main(unused_argv): # Export model export_tf_model(FLAGS.export_path) # Find latest frozen pb subdirs = [x for x in Path(FLAGS.export_path + '/frozen_pb').iterdir() if x.is_dir() and 'temp' not in str(x)] latest = str(sorted(subdirs)[-1]) # Create predictor predict_fn = predictor.from_saved_model(latest) dataset = create_dataset(path=FLAGS.test_path, buffer_size=25, batch_size=25, num_epochs=1) iterator = dataset.make_one_shot_iterator() # Eager execution for obtaining batch data from dataset x_val = iterator.get_next().numpy() z_val = np.zeros([x_val.shape[0], z_dim]) dict_in = {'x': x_val, 'z':z_val} # Make predictions and fetch results from output dict predictions = predict_fn(dict_in) x = predictions['x'] y = predictions['y'] # Show all source v.s. generated results compare_all(x, y, x.shape[0])
def process(self, message: Message, **kwargs: Any) -> None: from tensorflow.contrib import predictor from seq2label.input import to_fixed_len real_result_dir = os.path.join(self.model_dir, self.result_dir) print(real_result_dir) if not self.predict_fn: self.predict_fn = predictor.from_saved_model(real_result_dir) input_text = message.text input_feature = { 'words': [to_fixed_len([i for i in input_text], 20, '<pad>')], } print(input_feature) predictions = self.predict_fn(input_feature) label = predictions['label'][0].decode() intent = {"name": label, "confidence": 1} ranking = zip([i.decode() for i in predictions['label_mapping']], [float(i) for i in predictions['label_prob'][0]]) intent_ranking = [{ "name": name, "confidence": score } for name, score in ranking] message.set("intent", intent, add_to_output=True) message.set("intent_ranking", intent_ranking, add_to_output=True)
def main(unused_argv): dataset = pd.read_csv(FLAGS.orig_path, header=None, sep=",", names=[ "user", "item", "label", "gender", "age", "occupation", "genre1", "genre2", "genre3" ]) wide_cols, deep_cols, feat_func = create_feature_columns(dataset) eval_info = np.load(FLAGS.eval_info, allow_pickle=True) # used for evaluate user_dict, item_dict, item_info, item_indices = get_unique_info(dataset) pred_feat_func = functools.partial(predict_info, user_dict=user_dict, item_dict=item_dict) rank_feat_func = functools.partial(rank_info, user_dict=user_dict, item_info=item_info) if FLAGS.use_bn: print("use batch normalization...") wde = WideDeepEstimator(lr=FLAGS.lr, embed_size=FLAGS.embed_size, n_epochs=FLAGS.epochs, batch_size=FLAGS.batch_size, use_bn=FLAGS.use_bn, task=FLAGS.task, pred_feat_func=pred_feat_func, rank_feat_func=rank_feat_func, item_indices=item_indices) wde.fit(wide_cols, deep_cols, FLAGS.train_path, FLAGS.eval_path, feat_func, eval_info, verbose=2) print(wde.predict_ui(1, 2)) t6 = time.time() print(wde.recommend_user(1, n_rec=7)) print("recommend time: ", time.time() - t6) if FLAGS.export_and_load: wde.model.export_saved_model(FLAGS.export_model_dir, serving_input_receiver_fn) sub_dirs = [ x for x in Path(FLAGS.export_model_dir).iterdir() if x.is_dir() and 'temp' not in str(x) ] latest = str(sorted(sub_dirs)[-1]) predict_fn = predictor.from_saved_model(latest) samples = rank_info(user_dict, 1, item_info) t1 = time.time() rank = predict_fn(samples)["probabilities"].ravel() indices = np.argpartition(rank, -FLAGS.n_rec)[-FLAGS.n_rec:] print("recommend for user 1: %s" % sorted( zip(item_indices[indices], rank[indices]), key=lambda x: -x[1])) print("predict_fn recommend time: ", time.time() - t1)
def test_unit(self, text): #######################init######################### if self.model_loaded == False: #添加不参与训练样本 if os.path.exists(self.no_train_path): csv = pd.read_csv(self.no_train_path, header = 0, sep=",", error_bad_lines=False) self.text_list += list(csv['text']) self.label_list += list(csv['target']) subdirs = [x for x in Path(self.export_dir_path).iterdir() if x.is_dir() and 'temp' not in str(x)] latest = str(sorted(subdirs)[-1]) self.predict_fn = predictor.from_saved_model(latest) self.init_embedding() self.model_loaded = True self.vec_list = self._get_vecs(self.predict_fn, self.text_list) #self.set_zdy_labels(['睡觉','我回家了','晚安','娃娃了','周杰伦','自然语言处理'], # ['打开情景模式','打开情景模式','打开情景模式', # '打开情景模式','打开情景模式','打开情景模式']) text_list = self.text_list vec_list = self.vec_list label_list = self.label_list #用于添加自定义问句(自定义优先) if self.zdy != {}: text_list = self.zdy['text_list'] + text_list vec_list = np.concatenate([self.zdy['vec_list'], self.vec_list], axis = 0) label_list = self.zdy['label_list'] + label_list vec = self._get_vecs(self.predict_fn, [text], need_preprocess = True) scores = cosine_similarity(vec, vec_list)[0] max_id = np.argmax(scores) max_score = scores[max_id] max_similar = text_list[max_id] logging.info("test result: {}, {}, {}".format(label_list[max_id], max_score, max_similar)) return label_list[max_id], max_score, max_id
def predict(export_dir, predict_set): sess = tf.Session() import tensorflow.contrib.factorization #加载模型 tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], export_dir) from tensorflow.contrib import predictor #predictor.from_saved_model是从模型来构造一个预测函数,可以读取之前保存的model,并对输入数据进行聚类。 predict_fn = predictor.from_saved_model(export_dir) inputList = [] for test_data in predict_set: #以下这块是关于TFRecords的操作,如果大家一时半会理解不了,就可以简单理解为是在准备预测数据特征。 predictor_input_feature = { 'x': tf.train.Feature(float_list=tf.train.FloatList(value=test_data)) } input_for_predictor = tf.train.Example(features=tf.train.Features( feature=predictor_input_feature)) #把输入数据转换为String serialized_input = input_for_predictor.SerializeToString() inputList.append(serialized_input) results = predict_fn({"model_inputs": inputList}) clusterIndices = results['output'] #enumerate() 函数用于将一个可遍历的数据对象(如列表、元组或字符串)组合为一个索引序列,同时列出数据和数据下标。 for i, point in enumerate(predict_set): clusterIndex = clusterIndices[i] print('point:', point, 'is in cluster', clusterIndex) return clusterIndex
def predict_from_saved_model(model_path, image_path, decode_fn, top=5): predict_fn = predictor.from_saved_model(export_dir=model_path) with open(image_path, 'rb') as f: b64_x = f.read() b64_x = base64.urlsafe_b64encode(b64_x) input_instance = {'inputs': [b64_x]} preds = predict_fn(input_instance)['outputs'][0] if decode_fn == 'inception': preds = np.expand_dims(preds, 0) print('Predicted:') for p in decode_predictions(preds, top=5)[0]: print("Score {}, Label {}".format(p[2], p[1])) elif decode_fn == 'transfert': preds = [(fruit_mapping[idx], pred) for idx, pred in enumerate(preds)] preds = sorted(preds, key=lambda pred: pred[1], reverse=True) if top: preds = preds[:top] print('Predicted:') for pred in preds: print("Score {}, Label {}".format(pred[1], pred[0])) else: print('Predicted scores:') print(preds)
def predict(export_dir, file_path): img = cv2.imread(file_path) img = cv2.resize(img, (28, 28), cv2.INTER_LINEAR) img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) global num_input img = img.reshape(num_input) x_test = np.ndarray((1, num_input), dtype=np.float32) x_test[0] = img print('Predict mode') predict_fn = predictor.from_saved_model(export_dir) predictions = predict_fn({"images": x_test}) # df = pd.DataFrame(predictions) # df['original_labels'] = y_test print(predictions) # total = len(predictions['output']) # count = 0 # for i in range(total): # if predictions['output'][i] == y_test[i]: # count += 1 # accuracy = count/total # print("Predict Accuracy:", accuracy) return predictions
def __init_tf_v1(self): from tensorflow.contrib import predictor self.predict_fn = predictor.from_saved_model(self.model_dir) self.input_names = list(self.predict_fn.feed_tensors.keys()) self.output_names = list(self.predict_fn.fetch_tensors.keys()) self._convert_to_model_input = self._convert_to_model_input_v1 self._convert_to_model_output = self._convert_to_model_output_v1
def load(cls, model_dir: Text = None, model_metadata: "Metadata" = None, cached_component: Optional["BertIntentClassifier"] = None, **kwargs: Any) -> "BertIntentClassifier": meta = model_metadata.for_component(cls.name) if model_dir and meta.get("model_path"): model_path = os.path.normpath(meta.get("model_path")) graph = tf.Graph() with graph.as_default(): sess = tf.Session() predict_fn = predictor.from_saved_model(model_path) with io.open(os.path.join(model_dir, cls.name + "_label_list.pkl"), "rb") as f: label_list = pickle.load(f) return cls( component_config=meta, session=sess, label_list=label_list, predict_fn=predict_fn, ) else: logger.warning("Failed to load nlu model. Maybe path {} " "doesn't exist" "".format(os.path.abspath(model_dir))) return cls(component_config=meta)
def get_predictions_from_disk(request): #Load saved model export_dir = os.getcwd() + "/tflow_grir_model" predict_fn = predictor.from_saved_model(export_dir, signature_def_key='predict') #Get data from JSON data = request.get_json(force=True)['DATA'] df_req = pd.io.json.json_normalize(data) json_body_ml = {} for k in df_req.columns: json_body_ml[k] = list(df_req[k].values) #json_body_ml = {'instances': data} #print(json_body_ml) # predictions = predict_fn({"DIFGRIRV": [-38100],"NODLIR": [90],"VSTATU": ["1"],"NODLGR": [0],"DIFGRIRD": [-80],"VPATD": [30], # "WERKS": ["ML01"], # "EKORG": ["1"],"TOTGRQTY": [0],"SCENARIO": ["3"],"TOTIRQTY": [80],"KTOKK": ["1"],"EKGRP": ["A"]}) predictions = predict_fn(json_body_ml) out_list = [] for i in list(predictions['probabilities'][:, 1]): out = {} out['probability'] = str(i) out_list.append(out) print(out_list) return (out_list)
def test_forward_in_exported_sparse(self): features_columns = [fc.indicator_column( fc.categorical_column_with_vocabulary_list('x', range(10)))] classifier = linear.LinearClassifier(feature_columns=features_columns) def train_input_fn(): dataset = dataset_ops.Dataset.from_tensors({ 'x': sparse_tensor.SparseTensor( values=[1, 2, 3], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]), 'labels': [[0], [1]] }) def _split(x): labels = x.pop('labels') return x, labels dataset = dataset.map(_split) return dataset classifier.train(train_input_fn, max_steps=1) classifier = extenders.forward_features( classifier, keys=['x'], sparse_default_values={'x': 0}) def serving_input_fn(): features_ph = array_ops.placeholder(dtype=dtypes.int32, name='x', shape=[None]) features = {'x': layers.dense_to_sparse(features_ph)} return estimator_lib.export.ServingInputReceiver(features, {'x': features_ph}) export_dir, tmpdir = self._export_estimator(classifier, serving_input_fn) prediction_fn = from_saved_model(export_dir, signature_def_key='predict') features = (0, 2) prediction = prediction_fn({'x': features}) self.assertIn('x', prediction) self.assertEqual(features, tuple(prediction['x'])) gfile.DeleteRecursively(tmpdir)
from tensorflow.contrib import predictor base_dir = 'serving/1520360520' prediction_fn = predictor.from_saved_model(export_dir=base_dir, signature_def_key='serving_default') output = prediction_fn({ 'sms_input': ['i am in hospital da. . i will return home in evening'] }) print(output)
__author__ = 'KKishore' import tensorflow as tf from tensorflow.contrib import predictor tf.logging.set_verbosity(tf.logging.INFO) base_dir = 'serving/1518856330' prediction_fn = predictor.from_saved_model(export_dir=base_dir, signature_def_key='predictions') output = prediction_fn({ 'sms': [ 'i am in hospital da. . i will return home in evening', 'please call our customer service representative on freephone 0808 145 4742 between 9am-11pm as you have won a guaranteed £1000 cash or £5000 prize!' ] }) print(output['class'])