Example #1
0
class Model(ModelBase):
    def __init__(self):
        # load config file
        config = json.load(open("model/config.json"))
        # get the image processor
        self._imageProcessor = ImageProcessor(config)
        # load the DL model
        self._model = Xception()
        self._model.load_weights('model/model.h5')
        self._model._make_predict_function()

    def infer(self, input):
        # load preprocessed input
        inputAsNpArr = self._imageProcessor.loadAndPreprocess(input)
        # Run inference with caffe2
        results = self._model.predict(inputAsNpArr)
        # postprocess results into output
        output = self._imageProcessor.computeOutput(results)
        return output
Example #2
0
        service_principal_id=config['service_principal_id'],
        service_principal_password=config['service_principal_password'])
except KeyError as e:
    print("Getting Service Principal Authentication from Azure Devops")
    svr_pr = None
    pass

ws = Workspace.from_config(path=config_json, auth=svc_pr)

try:
    model_root = Model.get_model_path('trained_xception', _workspace=ws)
except ModelNotFoundException as e:
    print("Didn't find model, cannot perform knowledge distillation.")

model = Xception()
model.load_weights(os.path.join(model_root, "xception_weights.hdf5"))

# Remove softmax
model.layers.pop()

# Now model outputs logits
model = KerasModel(model.input, model.layers[-1].output)

# # Save logits as a dict: image name -> logit (256 dimensional vector)
train_logits = {}

batches = 0

for x_batch, _, name_batch in tqdm(train_generator):

    batch_logits = model.predict_on_batch(x_batch)