コード例 #1
0
ファイル: model_handler.py プロジェクト: zz202/cvat
 def __init__(self, labels):
     base_dir = os.environ.get("MODEL_PATH",
         "/opt/nuclio/open_model_zoo/intel/text-detection-0004/FP32")
     model_xml = os.path.join(base_dir, "text-detection-0004.xml")
     model_bin = os.path.join(base_dir, "text-detection-0004.bin")
     self.model = ModelLoader(model_xml, model_bin)
     self.labels = labels
コード例 #2
0
def main():
    loader = ModelLoader(SERIALIZED_DATA_PATH)
    xgb_clf, rf_clf, train, test, daily_metrics = loader.assemble_serialized_data(
    ).values()
    dashboard = ManagementDashboard(xgb_clf, rf_clf, train, test,
                                    daily_metrics)
    dashboard.run()
コード例 #3
0
ファイル: model_handler.py プロジェクト: zz202/cvat
 def __init__(self, labels):
     base_dir = os.environ.get("MODEL_PATH",
         "/opt/nuclio/open_model_zoo/public/yolo-v3-tf/FP32")
     model_xml = os.path.join(base_dir, "yolo-v3-tf.xml")
     model_bin = os.path.join(base_dir, "yolo-v3-tf.bin")
     self.model = ModelLoader(model_xml, model_bin)
     self.labels = labels
コード例 #4
0
 def __init__(self, labels):
     base_dir = os.environ.get("MODEL_PATH",
         "/opt/nuclio/open_model_zoo/public/mask_rcnn_inception_resnet_v2_atrous_coco/FP32")
     model_xml = os.path.join(base_dir, "mask_rcnn_inception_resnet_v2_atrous_coco.xml")
     model_bin = os.path.join(base_dir, "mask_rcnn_inception_resnet_v2_atrous_coco.bin")
     self.model = ModelLoader(model_xml, model_bin)
     self.labels = labels
コード例 #5
0
 def __init__(self, labels):
     base_dir = os.path.abspath(os.environ.get("MODEL_PATH",
         "/opt/nuclio/open_model_zoo/public/faster_rcnn_inception_v2_coco/FP32"))
     model_xml = os.path.join(base_dir, "faster_rcnn_inception_v2_coco.xml")
     model_bin = os.path.join(base_dir, "faster_rcnn_inception_v2_coco.bin")
     self.model = ModelLoader(model_xml, model_bin)
     self.labels = labels
コード例 #6
0
class AttributesExtractorHandler:
    def __init__(self):
        age_gender_base_dir = os.path.abspath(os.environ.get("AGE_GENDER_MODEL_PATH",
            "/opt/nuclio/open_model_zoo/intel/age-gender-recognition-retail-0013/FP32"))
        age_gender_model_xml = os.path.join(age_gender_base_dir, "age-gender-recognition-retail-0013.xml")
        age_gender_model_bin = os.path.join(age_gender_base_dir, "age-gender-recognition-retail-0013.bin")
        self.age_gender_model = ModelLoader(age_gender_model_xml, age_gender_model_bin)
        emotions_base_dir = os.path.abspath(os.environ.get("EMOTIONS_MODEL_PATH",
            "/opt/nuclio/open_model_zoo/intel/emotions-recognition-retail-0003/FP32"))
        emotions_model_xml = os.path.join(emotions_base_dir, "emotions-recognition-retail-0003.xml")
        emotions_model_bin = os.path.join(emotions_base_dir, "emotions-recognition-retail-0003.bin")
        self.emotions_model = ModelLoader(emotions_model_xml, emotions_model_bin)
        self.genders_map = ["female", "male"]
        self.emotions_map = ["neutral", "happy", "sad", "surprise", "anger"]

    def infer(self, image):
        age_gender_request = self.age_gender_model.async_infer(image)
        emotions_request = self.emotions_model.async_infer(image)
        # Wait until both age_gender and emotion recognition async inferences finish
        while not (age_gender_request.wait(0) == 0 and emotions_request.wait(0) == 0):
            continue
        age = int(np.squeeze(age_gender_request.output_blobs["age_conv3"].buffer) * 100)
        gender = self.genders_map[np.argmax(np.squeeze(age_gender_request.output_blobs["prob"].buffer))]
        emotion = self.emotions_map[np.argmax(np.squeeze(emotions_request.output_blobs['prob_emotion'].buffer))]
        return {"attributes": [
            {"name": "age", "value": str(age)},
            {"name": "gender", "value": gender},
            {"name": "emotion", "value": emotion}
        ]}
コード例 #7
0
    def load_models(self, model_list):
        """Handles deployed model for init"""
        self.logger.debug('Update models received with payload: %s',
                          str(model_list))
        model_loader = ModelLoader(self.client_config)
        models_to_load = model_list
        self.models_loaded = model_loader.get_models_from_list(models_to_load)
        #model_list=[["HelloWorldExample", "1.0.0"],["HelloAI2", "1.0.0"],["TensorflowMnistExample","1.0.0"],['fib_model',"1.0.2"]]

        return json.dumps({"Init Model Service": "Success"})
コード例 #8
0
class ModelHandler:
    def __init__(self, labels):
        base_dir = os.path.abspath(
            os.environ.get(
                "MODEL_PATH",
                "/opt/nuclio/open_model_zoo/public/yolo-v3-tf/FP32"))
        model_xml = os.path.join(base_dir, "yolo-v3-tf.xml")
        model_bin = os.path.join(base_dir, "yolo-v3-tf.bin")
        self.model = ModelLoader(model_xml, model_bin)
        self.labels = labels

    def infer(self, image, threshold):
        output_layer = self.model.infer(image)

        # Collecting object detection results
        objects = []
        origin_im_size = (image.height, image.width)
        for layer_name, out_blob in output_layer.items():
            out_blob = out_blob.reshape(self.model.layers[
                self.model.layers[layer_name].parents[0]].shape)
            layer_params = YoloParams(self.model.layers[layer_name].params,
                                      out_blob.shape[2])
            objects += parse_yolo_region(out_blob, self.model.input_size(),
                                         origin_im_size, layer_params,
                                         threshold)

        # Filtering overlapping boxes (non-maximum suppression)
        IOU_THRESHOLD = 0.4
        objects = sorted(objects,
                         key=lambda obj: obj['confidence'],
                         reverse=True)
        for i, obj in enumerate(objects):
            if obj['confidence'] == 0:
                continue
            for j in range(i + 1, len(objects)):
                if intersection_over_union(obj, objects[j]) > IOU_THRESHOLD:
                    objects[j]['confidence'] = 0

        results = []
        for obj in objects:
            if obj['confidence'] >= threshold:
                xtl = max(obj['xmin'], 0)
                ytl = max(obj['ymin'], 0)
                xbr = min(obj['xmax'], image.width)
                ybr = min(obj['ymax'], image.height)
                obj_class = int(obj['class_id'])

                results.append({
                    "confidence": str(obj['confidence']),
                    "label": self.labels.get(obj_class, "unknown"),
                    "points": [xtl, ytl, xbr, ybr],
                    "type": "rectangle",
                })

        return results
コード例 #9
0
    def __init__(self):
        base_dir = os.environ.get(
            "MODEL_PATH",
            "/opt/nuclio/open_model_zoo/intel/person-reidentification-retail-0300/FP32"
        )
        model_xml = os.path.join(base_dir,
                                 "person-reidentification-retail-0300.xml")
        model_bin = os.path.join(base_dir,
                                 "person-reidentification-retail-0300.bin")

        self.model = ModelLoader(model_xml, model_bin)
コード例 #10
0
 def __init__(self, labels):
     base_dir = os.path.abspath(
         os.environ.get(
             "MODEL_PATH",
             "/opt/nuclio/open_model_zoo/intel/semantic-segmentation-adas-0001/FP32"
         ))
     model_xml = os.path.join(base_dir,
                              "semantic-segmentation-adas-0001.xml")
     model_bin = os.path.join(base_dir,
                              "semantic-segmentation-adas-0001.bin")
     self.model = ModelLoader(model_xml, model_bin)
     self.labels = labels
コード例 #11
0
def main():
    loader = ModelLoader(SERIALIZED_DATA_PATH)
    (xgb_clf_name, rf_clf_name, importances, daily_metrics, train_tsne,
     test_tsne, model_probas, score_distributions_train,
     score_distributions_test,
     curves) = loader.assemble_serialized_data().values()
    analysis_dashboard = AnalysisDashboard(xgb_clf_name, rf_clf_name,
                                           importances, daily_metrics,
                                           train_tsne, test_tsne, model_probas,
                                           score_distributions_train,
                                           score_distributions_test, curves)
    analysis_dashboard.run()
コード例 #12
0
 def __init__(self):
     age_gender_base_dir = os.path.abspath(os.environ.get("AGE_GENDER_MODEL_PATH",
         "/opt/nuclio/open_model_zoo/intel/age-gender-recognition-retail-0013/FP32"))
     age_gender_model_xml = os.path.join(age_gender_base_dir, "age-gender-recognition-retail-0013.xml")
     age_gender_model_bin = os.path.join(age_gender_base_dir, "age-gender-recognition-retail-0013.bin")
     self.age_gender_model = ModelLoader(age_gender_model_xml, age_gender_model_bin)
     emotions_base_dir = os.path.abspath(os.environ.get("EMOTIONS_MODEL_PATH",
         "/opt/nuclio/open_model_zoo/intel/emotions-recognition-retail-0003/FP32"))
     emotions_model_xml = os.path.join(emotions_base_dir, "emotions-recognition-retail-0003.xml")
     emotions_model_bin = os.path.join(emotions_base_dir, "emotions-recognition-retail-0003.bin")
     self.emotions_model = ModelLoader(emotions_model_xml, emotions_model_bin)
     self.genders_map = ["female", "male"]
     self.emotions_map = ["neutral", "happy", "sad", "surprise", "anger"]
コード例 #13
0
class ModelHandler:
    def __init__(self, labels):
        base_dir = os.path.abspath(os.environ.get("MODEL_PATH",
            "/opt/nuclio/open_model_zoo/public/faster_rcnn_inception_v2_coco/FP32"))
        model_xml = os.path.join(base_dir, "faster_rcnn_inception_v2_coco.xml")
        model_bin = os.path.join(base_dir, "faster_rcnn_inception_v2_coco.bin")
        self.model = ModelLoader(model_xml, model_bin)
        self.labels = labels

    def infer(self, image, threshold):
        output_layer = self.model.infer(image)

        results = []
        prediction = output_layer[0][0]
        for obj in prediction:
            obj_class = int(obj[1])
            obj_value = obj[2]
            obj_label = self.labels.get(obj_class, "unknown")
            if obj_value >= threshold:
                xtl = obj[3] * image.width
                ytl = obj[4] * image.height
                xbr = obj[5] * image.width
                ybr = obj[6] * image.height

                results.append({
                    "confidence": str(obj_value),
                    "label": obj_label,
                    "points": [xtl, ytl, xbr, ybr],
                    "type": "rectangle",
                })

        return results
コード例 #14
0
ファイル: model_handler.py プロジェクト: ChrisPHP/cvatron
class ModelHandler:
    def __init__(self, labels):
        base_dir = os.path.abspath(
            os.environ.get(
                "MODEL_PATH",
                "/opt/nuclio/open_model_zoo/intel/text-detection-0004/FP32"))
        model_xml = os.path.join(base_dir, "text-detection-0004.xml")
        model_bin = os.path.join(base_dir, "text-detection-0004.bin")
        self.model = ModelLoader(model_xml, model_bin)
        self.labels = labels

    def infer(self, image, pixel_threshold, link_threshold):
        output_layer = self.model.infer(image)

        results = []
        obj_class = 1
        pcd = PixelLinkDecoder(pixel_threshold, link_threshold)

        pcd.decode(image.height, image.width, output_layer)
        for box in pcd.bboxes:
            results.append({
                "confidence": None,
                "label": self.labels.get(obj_class, "unknown"),
                "points": box.ravel().tolist(),
                "type": "polygon",
            })

        return results
コード例 #15
0
class FaceDetectorHandler:
    def __init__(self):
        base_dir = os.path.abspath(os.environ.get("DETECTOR_MODEL_PATH",
            "/opt/nuclio/open_model_zoo/intel/face-detection-0205/FP32"))
        model_xml = os.path.join(base_dir, "face-detection-0205.xml")
        model_bin = os.path.join(base_dir, "face-detection-0205.bin")
        self.model = ModelLoader(model_xml, model_bin)

    def infer(self, image, threshold):
        infer_res = self.model.infer(image)["boxes"]
        infer_res = infer_res[infer_res[:,4] > threshold]

        results = []
        faces = []
        h_scale = image.height / 416
        w_scale = image.width / 416
        for face in infer_res:
            xmin = int(face[0] * w_scale)
            ymin = int(face[1] * h_scale)
            xmax = int(face[2] * w_scale)
            ymax = int(face[3] * h_scale)
            confidence = face[4]

            faces.append(np.array(image)[ymin:ymax, xmin:xmax])
            results.append({
                "confidence": str(confidence),
                "label": "face",
                "points": [xmin, ymin, xmax, ymax],
                "type": "rectangle",
                "attributes": []
            })

        return results, faces
コード例 #16
0
 def __init__(self):
     np.random.seed(0)
     self.model_obj = ModelLoader().load()
     self.model = self.model_obj.model
     self.mappings = self.model_obj.mappings
     self.ratings = self.model_obj.ratings
     self.movies = self.model_obj.movies
     self.preparePopularMovies()
コード例 #17
0
ファイル: main.py プロジェクト: ChrisPHP/cvatron
def init_context(context):
    context.logger.info("Init context...  0%")
    model_path = "/opt/nuclio/faster_rcnn/frozen_inference_graph.pb"
    model_handler = ModelLoader(model_path)
    setattr(context.user_data, 'model_handler', model_handler)
    functionconfig = yaml.safe_load(open("/opt/nuclio/function.yaml"))
    labels_spec = functionconfig['metadata']['annotations']['spec']
    labels = {item['id']: item['name'] for item in json.loads(labels_spec)}
    setattr(context.user_data, "labels", labels)
    context.logger.info("Init context...100%")
コード例 #18
0
def init_context(context):
    context.logger.info("Init context...  0%")
    model_path = "/opt/nuclio/efficientdet-d0.pth"
    functionconfig = yaml.safe_load(open("/opt/nuclio/function.yaml"))
    labels_spec = functionconfig['metadata']['annotations']['spec']
    labels = {str(item['id']): item['name'] for item in json.loads(labels_spec)}
    model_handler = ModelLoader(model_path, labels)
    setattr(context.user_data, 'model_handler', model_handler)
    setattr(context.user_data, "labels", labels)
    context.logger.info("Init context...100%")
コード例 #19
0
ファイル: main.py プロジェクト: ChrisPHP/cvatron
def init_context(context):
    context.logger.info("Init context...  0%")

    functionconfig = yaml.safe_load(open("/opt/nuclio/function.yaml"))
    labels_spec = functionconfig['metadata']['annotations']['spec']
    labels = {item['id']: item['name'] for item in json.loads(labels_spec)}

    model_handler = ModelLoader(labels)
    setattr(context.user_data, 'model_handler', model_handler)

    context.logger.info("Init context...100%")
コード例 #20
0
 def __init__(self, width, height):
     pg.init()
     self.W = width
     self.H = height
     self.screen = pg.display.set_mode((self.W, self.H))
     self.running = True
     self.scl = 40
     self.cols = self.W // self.scl
     self.rows = self.H // self.scl
     self.grid = [[0 for i in range(self.cols)] for j in range(self.rows)]
     self.ip = ImageProcessor()
     self.model = ModelLoader('model/trained_model_2.pt').load_model()
コード例 #21
0
def init_context(context):
    context.logger.info("Init context...  0%")

    with open("/opt/nuclio/function.yaml", 'rb') as function_file:
        functionconfig = yaml.safe_load(function_file)
    labels_spec = functionconfig['metadata']['annotations']['spec']
    labels = {item['id']: item['name'] for item in json.loads(labels_spec)}

    model_handler = ModelLoader(labels)
    context.user_data.model_handler = model_handler

    context.logger.info("Init context...100%")
コード例 #22
0
ファイル: main.py プロジェクト: openvinotoolkit/cvat
def init_context(context):
    context.logger.info("Init context...  0%")
    model_path = "/opt/nuclio/faster_rcnn/frozen_inference_graph.pb"
    model_handler = ModelLoader(model_path)
    context.user_data.model_handler = model_handler

    with open("/opt/nuclio/function.yaml", 'rb') as function_file:
        functionconfig = yaml.safe_load(function_file)
    labels_spec = functionconfig['metadata']['annotations']['spec']
    labels = {item['id']: item['name'] for item in json.loads(labels_spec)}
    context.user_data.labels = labels

    context.logger.info("Init context...100%")
コード例 #23
0
class ModelHandler:
    def __init__(self, labels):
        base_dir = os.path.abspath(
            os.environ.get(
                "MODEL_PATH",
                "/opt/nuclio/open_model_zoo/public/mask_rcnn_inception_resnet_v2_atrous_coco/FP32"
            ))
        model_xml = os.path.join(
            base_dir, "mask_rcnn_inception_resnet_v2_atrous_coco.xml")
        model_bin = os.path.join(
            base_dir, "mask_rcnn_inception_resnet_v2_atrous_coco.bin")
        self.model = ModelLoader(model_xml, model_bin)
        self.labels = labels

    def infer(self, image, threshold):
        output_layer = self.model.infer(image)

        results = []
        masks = output_layer['masks']
        boxes = output_layer['reshape_do_2d']

        for index, box in enumerate(boxes):
            obj_class = int(box[1])
            obj_value = box[2]
            obj_label = self.labels.get(obj_class, "unknown")
            if obj_value >= threshold:
                xtl = box[3] * image.width
                ytl = box[4] * image.height
                xbr = box[5] * image.width
                ybr = box[6] * image.height
                mask = masks[index][obj_class - 1]

                mask = segm_postprocess((xtl, ytl, xbr, ybr), mask,
                                        image.height, image.width)

                contours = find_contours(mask, MASK_THRESHOLD)
                contour = contours[0]
                contour = np.flip(contour, axis=1)
                contour = approximate_polygon(contour, tolerance=2.5)
                if len(contour) < 3:
                    continue

                results.append({
                    "confidence": str(obj_value),
                    "label": obj_label,
                    "points": contour.ravel().tolist(),
                    "type": "polygon",
                })

        return results
コード例 #24
0
class ModelHandler:
    def __init__(self, labels):
        base_dir = os.path.abspath(
            os.environ.get(
                "MODEL_PATH",
                "/opt/nuclio/open_model_zoo/intel/semantic-segmentation-adas-0001/FP32"
            ))
        model_xml = os.path.join(base_dir,
                                 "semantic-segmentation-adas-0001.xml")
        model_bin = os.path.join(base_dir,
                                 "semantic-segmentation-adas-0001.bin")
        self.model = ModelLoader(model_xml, model_bin)
        self.labels = labels

    def infer(self, image, threshold):
        output_layer = self.model.infer(image)

        results = []
        mask = output_layer[0, 0, :, :]
        width, height = mask.shape

        for i in range(len(self.labels)):
            mask_by_label = np.zeros((width, height), dtype=np.uint8)

            mask_by_label = ((mask == float(i)) * 255).astype(np.float32)
            mask_by_label = cv2.resize(mask_by_label,
                                       dsize=(image.width, image.height),
                                       interpolation=cv2.INTER_CUBIC)

            contours = find_contours(mask_by_label, 0.8)

            for contour in contours:
                contour = np.flip(contour, axis=1)
                contour = approximate_polygon(contour, tolerance=2.5)
                if len(contour) < 3:
                    continue

                results.append({
                    "confidence": None,
                    "label": self.labels.get(i, "unknown"),
                    "points": contour.ravel().tolist(),
                    "type": "polygon",
                })

        return results
コード例 #25
0
    def __init__(self, method=const.METHOD_ORB, use_video=False):
        self._window = Window(const.WINDOW_NAME, self.on_keypress)
        self._frame = None
        self._model = ModelLoader(const.MODEL_PATH, swap_yz=True)
        self._img_marker = cv2.imread(const.MARKER_PATH, cv2.IMREAD_GRAYSCALE)
        self._img_marker_2 = cv2.imread(const.MARKER_PATH_HIRO,
                                        cv2.IMREAD_GRAYSCALE)
        self._method = method

        self._capture_controller = CaptureController(
            capture=self._get_capture_source(use_video),
            window_manager=self._window,
            mirror_preview=True,
        )

        self._filter = None
        self._draw_contours = False
        self._draw_lines = False
        self._draw_corners = False
コード例 #26
0
def main():
    """Run Prediction"""

    # Measures total program runtime by collecting start time
    start_time = time.time()

    # get input args
    in_arg = get_input_args()

    # Function that checks command line arguments using in_arg
    print_command_line_arguments(in_arg)
    print()

    # get dataloader to load labels and process image
    dataloader = ModelDataLoader('data')
    category_names = dataloader.get_label_dict(in_arg.category_names)

    # use modelloader to load checkpoint
    loader = ModelLoader(in_arg.checkpoint_dir)

    if loader.checkpoint_exists(in_arg.checkpoint):
        print("Loading checkpoint %s" %
              (loader.get_checkpoint_path(in_arg.checkpoint)))
        model = loader.load_checkpoint(
            in_arg.checkpoint, model_use_gpu=in_arg.gpu, with_trainer=False)
    else:
        print("Checkpoint '%s' does not exist. Exiting." %
              (loader.get_checkpoint_path(in_arg.checkpoint)))
        return

    print()

    # convert image
    image = dataloader.process_image(in_arg.image)

    # run prediction
    probs, classes = model.predict(image, in_arg.top_k)
    labels = [category_names[c] for c in classes]

    for prob, clas, label in zip(probs, classes, labels):
        print("%5.3f%% %s [%s]" % (prob*100, label, clas))

    print()
    print("Total Prediction Duration: %.3fs" % (time.time() - start_time))
コード例 #27
0
dataloader_task1 = torch.utils.data.DataLoader(labeled_trainset_task1,
                                               batch_size=1,
                                               shuffle=False,
                                               num_workers=0)
# For road map task
labeled_trainset_task2 = LabeledDataset(image_folder=image_folder,
                                        annotation_file=annotation_csv,
                                        scene_index=labeled_scene_index,
                                        transform=get_transform_task2(),
                                        extra_info=False)
dataloader_task2 = torch.utils.data.DataLoader(labeled_trainset_task2,
                                               batch_size=1,
                                               shuffle=False,
                                               num_workers=0)

model_loader = ModelLoader()

total = 0
total_ats_bounding_boxes = 0
total_ts_road_map = 0
with torch.no_grad():
    for i, data in enumerate(dataloader_task1):
        total += 1
        sample, target, road_image = data
        sample = sample.cuda()

        predicted_bounding_boxes = model_loader.get_bounding_boxes(
            sample)[0].cpu()
        ats_bounding_boxes, iou_max = compute_ats_bounding_boxes(
            predicted_bounding_boxes, target['bounding_box'][0])
        total_ats_bounding_boxes += ats_bounding_boxes
コード例 #28
0
import pandas as pd
from flask import Flask, request
from gevent.pywsgi import WSGIServer

from bayes_classifier import NaiveBayes
from model_loader import ModelLoader
from regression import Regression

app = Flask(__name__)

naive = {'male': NaiveBayes('data/male_data_0.csv'), 'female': NaiveBayes('data/female_data_0.csv')}
best_model = {
    'male': ModelLoader('models/male_best_model_10_06_2019_02_10_09.joblib'),
    'female': ModelLoader('models/female_best_model_10_06_2019_02_26_36.joblib')
}
model02 = {
    'male': Regression('data/male_data_0.csv'),
    'female': Regression('data/female_data_0.csv')
}
model03 = {
    'male': ModelLoader('models/best_model_10_05_2019_22_13_28.joblib'),
   # 'female': ModelLoader('models/')
}


@app.after_request
def after_request(response):
    header = response.headers
    header['Access-Control-Allow-Origin'] = '*'
    header['Access-Control-Allow-Headers'] = '*'
    return response
コード例 #29
0
ファイル: server.py プロジェクト: karan10111/Hunch
import requests
import hunch_server
import yaml

hunch_server_config = {}

if 'HUNCH_CONFIG' in os.environ:
    if os.path.exists(os.environ['HUNCH_CONFIG']):
        with open(os.environ['HUNCH_CONFIG']) as f:
            hunch_server_config = yaml.load(f)

ROTATION_FILE_PATH = hunch_server_config["rotation_status_file"]
app = Flask(__name__)
app.logger_name = "hunch.app"
models_loaded = {}
model_loader = ModelLoader(hunch_server_config)
try:
    if 'MODELS_TO_LOAD' in os.environ:
        models_to_load = json.loads(os.environ['MODELS_TO_LOAD'])
        models_loaded = model_loader.get_models_from_list(models_to_load)

except requests.exceptions.HTTPError as e:
    app.logger.error("Meta Service has thrown %s, the error is %s and stack trace is %s"
                     %(e.response.status_code, e.message, str(traceback.format_exc())))
    raise RuntimeError("Meta Service has thrown '{}' , the error is {} and stack trace is {}".format(e.response.status_code, e.message, str(traceback.format_exc())))

app.logger.info("Loaded models are: " + json.dumps(models_loaded.keys()))

@app.route('/elb-healthcheck', methods=['GET'])
def elb_healthcheck():
    try:
コード例 #30
0
def main():
    """Run Training Session"""

    # Measures total program runtime by collecting start time
    start_time = time.time()

    # get input args
    in_arg = get_input_args()

    # Function that checks command line arguments using in_arg
    print_command_line_arguments(in_arg)
    print()

    # load datasets
    dataloader = ModelDataLoader(in_arg.data_dir)
    train_dataset, train_dataloader = dataloader.get_train_data()
    valid_dataset, valid_dataloader = dataloader.get_validation_data()
    test_dataset, test_dataloader = dataloader.get_test_data()

    # Use model loader to load existing checkpoint
    loader = ModelLoader(in_arg.checkpoint_dir)

    if loader.checkpoint_exists(in_arg.checkpoint):
        # load checkpoint
        print("Loading checkpoint %s" %
              (loader.get_checkpoint_path(in_arg.checkpoint)))
        model, trainer = loader.load_checkpoint(
            in_arg.checkpoint, model_use_gpu=in_arg.gpu)
        print("Epochs trained so far: %d" % (trainer.trained_epochs))
    else:
        # no checkpoint, create fresh model using input arguments
        print("Checkpoint '%s' does not exist" %
              (loader.get_checkpoint_path(in_arg.checkpoint)))
        model = Model(train_dataset.class_to_idx,
                      arch=in_arg.arch, use_gpu=in_arg.gpu,
                      hidden_units_1=in_arg.hidden_units_1, hidden_units_2=in_arg.hidden_units_2,
                      dropout_1=in_arg.dropout_1, dropout_2=in_arg.dropout_2)
        trainer = ModelTrainer(model, learning_rate=in_arg.learning_rate)

    print()
    print("Model training in session...")
    print()

    epochs = in_arg.epochs

    # train model and print results
    for result in trainer.train_epochs(epochs, train_dataloader, valid_dataloader):
        print(
            "Epoch: %3d/%3d" % (result['epoch']+1, epochs),
            " | Train Loss: %10.5f" % (result['train_loss']),
            " | Validation Loss: %10.5f" % (result['validation_loss']),
            " | Validation Acc: %6.3f%%" % (
                result['validation_accuracy'] * 100),
            " | Duration: %10.3fs" % (result['duration'])
        )

    print()
    print("Testing model against test data...")
    print()

    # test model against test data
    test_result = trainer.test(test_dataloader)

    print(
        "Test Loss: %10.5f" % (test_result['test_loss']),
        " | Test Acc: %6.3f%%" % (test_result['test_accuracy'] * 100),
        " | Duration: %10.3fs" % (test_result['duration'])
    )

    # save checkpoint
    loader.save_checkpoint(in_arg.checkpoint, model, trainer)

    print()
    print("Total Train Duration: %.3fs" % (time.time() - start_time))
コード例 #31
0
from flask import Flask
from flask.blueprints import Blueprint
from flask_cors import CORS

import config
import routes
from model_loader import ModelLoader

server = Flask(__name__)

if config.ENABLE_CORS:
    cors = CORS(server, resources={r"/api/*": {"origins": "*"}})

for blueprint in vars(routes).values():
    if isinstance(blueprint, Blueprint):
        server.register_blueprint(blueprint, url_prefix=config.API_URL_PREFIX)

if __name__ == "__main__":
    #print("server details",server.url_map)
    server.run(host=config.HOST, port=config.PORT, debug=True)
    ModelLoader()