예제 #1
0
def load(path, actions):
    model_dict = {}
    print("loading actions "),
    if(Constants.alg_type == "fitted"):
        for action in actions:
            print('.'),
            model_dict[action]=load_model(path+action+".h5")
    elif(Constants.alg_type == "A/C"):
        model_dict["model"] = saved_model.load(path+"actor")
        critic_dict = {}
        critic_dict["model"] = saved_model.load(path+"critic")
    else:
        model_dict["model"] = load_model(path+"model.h5")
    print("done")
    return model_dict
예제 #2
0
 def _load_tensorflow_model(self, model_path: str):
     """ Load a Tensorflow flatbuffer model file and return predict function
     """
     serving_dir = os.path.dirname(model_path)
     self.model = saved_model.load(serving_dir)
     self.infer = self.model.signatures["serving_default"]
     self.input_shape = tuple(self.infer.structured_input_signature[1][list(
         self.infer.structured_input_signature[1].keys())[0]].shape)  #oof
     return lambda x: self._tfPredict(x)
예제 #3
0
 def loadTreeModel(self):
     self.loadFiles()
     print("files loaded")
     if saved_model.contains_saved_model(self.savedTreeLocation):
         self.importedTree = saved_model.load(self.savedTreeLocation)
         result = self.predict2(self.testX, self.importedTree)
         print("Tree loaded...", result)
     else:
         print("Tree not available.")
예제 #4
0
def _main(_):
    # cannot convert savedmodel with empty SignatureMap
    model = saved_model.load(FLAGS.model)
    if len(model.signatures) == 0:
        logger.info("Savedmodel cannot be converted with empty signature map! Please check model.signatures before conversion.")
        raise ValueError

    converter = create_converter(FLAGS.model, model_loader)
    tflite_model = converter.convert()
    with open(FLAGS.output, "wb") as f:
        f.write(tflite_model)
예제 #5
0
    def _load_model(self):
        """Retrieves all the tag-sets available in the SavedModel.
        Args:
          saved_model_dir: Directory containing the SavedModel.
        Returns:
          String representation of all tag-sets in the SavedModel.
        """
        sess = tf.Session()
        MetaGraphDef = saved_model.load(sess, [saved_model.SERVING],
                                        os.path.join(self.dir, 'model'))
        sig = None
        if DEFAULT_SERVING_SIGNATURE_DEF_KEY not in MetaGraphDef.signature_def:
            for sig_itr in MetaGraphDef.signature_def:
                if (sig_itr != INIT_OP_SIGNATURE_DEF_KEY
                        and sig_itr != TRAIN_OP_SIGNATURE_DEF_KEY):
                    sig = sig_itr
                    break
        else:
            sig = DEFAULT_SERVING_SIGNATURE_DEF_KEY

        assert sig is not None, "unable to load model , expected " + DEFAULT_SERVING_SIGNATURE_DEF_KEY + " signature"

        self.graph = MetaGraphDef.graph_def
        self.signature = MetaGraphDef.signature_def[sig]
예제 #6
0
파일: app.py 프로젝트: HAadams/od_app
from PIL import Image
from wtforms import Form
import numpy as np

from object_detection.utils import visualization_utils as viz_utils
from tensorflow import saved_model
from tensorflow.keras.backend import clear_session
from matplotlib.pyplot import rcParams
from matplotlib.pyplot import figure

app = Flask(__name__)
app.secret_key = b'_5$GFS#y2L"**&^*&FR%&#^F4Q8z\n\xec]/'
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024

clear_session()
detect_fn = saved_model.load('./faster_rcnn_trained_model/saved_model/')

category_index = {
    1: {
        'id': 1,
        'name': 'rider'
    },
    2: {
        'id': 2,
        'name': 'bicycle'
    },
    3: {
        'id': 3,
        'name': 'other_person'
    },
    4: {
예제 #7
0
def inference(inputPath,
              outputPath,
              modelPath=DEFAULT_MODEL_PATH,
              labelPath=DEFAULT_LABEL_MAP_PATH):
    print("Loading model... ", end='')

    print(modelPath)

    download_url = 'https://drive.google.com/uc?id=1A_ZaPoMu1AKVKBjfOrD9R3ekMI50eKcx'
    output = os.path.join(modelPath, 'variables/variables.data-00000-of-00001')

    print(output)
    if not os.path.isfile(output):
        print('downloading')
        gdown.download(download_url, output, quiet=False)
    else:
        print('NOT downloading')

    # client.download_file(BUCKET_NAME,
    #                      'exported-models/my_model/saved_model/saved_model.pb',
    #                      'exported-models/my_model/saved_model/variables/variables.data-00000-of-00001')

    # Load saved model and build the detection function
    detect_fn = saved_model.load(modelPath)

    # Loading the label_map
    category_index = label_map_util.create_category_index_from_labelmap(
        labelPath, use_display_name=True)

    print("Done!")

    def load_image_into_numpy_array(path):
        # Load an image from file into a numpy array.
        return np.array(Image.open(path))

    def predict(inputPath, imageName, modelPath, labelPath, outputPath):
        print("Running inference for {}... ".format(
            os.path.join(inputPath, imageName)),
              end='')
        image_np = load_image_into_numpy_array(
            os.path.join(inputPath, imageName))

        # The input needs to be a tensor, convert it using `tf.convert_to_tensor`
        input_tensor = convert_to_tensor(image_np)
        # The model expects a batch of images, so add an axis with `tf.newaxis`
        input_tensor = input_tensor[newaxis, ...]

        # input_tensor = np.expand_dims(image_np, 0)
        try:
            detections = detect_fn(input_tensor)
        except ValueError:
            print("Oops!")
            return

        # Convert to numpy arrays, and take index [0] to remove the batch dimension
        num_detections = int(detections.pop('num_detections'))
        detections = {
            key: value[0, :num_detections].numpy()
            for key, value in detections.items()
        }
        detections['num_detections'] = num_detections

        # Detection_classes should be ints
        detections['detection_classes'] = detections[
            'detection_classes'].astype(np.int64)

        image_np_with_detections = image_np.copy()

        viz_utils.visualize_boxes_and_labels_on_image_array(
            image_np_with_detections,
            detections['detection_boxes'],
            detections['detection_classes'],
            detections['detection_scores'],
            category_index,
            use_normalized_coordinates=True,
            max_boxes_to_draw=1,
            min_score_thresh=.08,
            agnostic_mode=False)

        image = Image.fromarray(image_np_with_detections)
        image.save(
            os.path.join(outputPath,
                         'predicted-' + os.path.basename(imageName)))
        print("Done!")

    if os.path.isdir(inputPath):
        for imageName in [
                i for i in os.listdir(inputPath)
                if os.path.isfile(os.path.join(inputPath, i))
        ]:
            predict(inputPath, imageName, modelPath, labelPath, outputPath)
    else:
        predict("", inputPath, modelPath, labelPath, outputPath)
예제 #8
0
 def load_agent(self):
     self.actor = saved_model.load(Constants.load_model_dir + "actor")
     self.critic = saved_model.load(Constants.load_model_dir + "critic")