Пример #1
0
    def test_ml_predict_helper_works(self):
        """
        Test ML Predict helper works
        """
        test_label = str("mlpredict_{}").format(
                str(uuid.uuid4()))
        use_csv = "/tmp/cleaned_attack_scans.csv"
        model_desc = self.build_model_desc()
        model_and_weights = self.build_model_and_weights()
        prediction_manifest = self.build_prediction_manifest()
        predict_rows = self.build_prediction_rows(
            data_file=use_csv)
        self.maxDiff = None
        data = {
            "label": test_label,
            "predict_rows": predict_rows,
            "manifest": prediction_manifest,
            "model_desc": model_desc,
            "model_json": model_and_weights["model"],
            "weights_json": model_and_weights["weights"]
        }

        res = make_predictions(data)
        self.assertEqual(
            res["err"],
            "")
        self.assertEqual(
            res["status"],
            SUCCESS)
        self.assertTrue(len(res["data"]) > 0)
Пример #2
0
    def test_ml_predict_missing_predictions(self):
        """
        Test ML Predict helper fails if no predictions are provided
        """
        test_label = str("mlpredict_{}").format(
                str(uuid.uuid4()))
        model_desc = self.build_model_desc()
        model_and_weights = self.build_model_and_weights()
        prediction_manifest = self.build_prediction_manifest()
        predict_rows = None
        del prediction_manifest["csv_file"]
        data = {
            "label": test_label,
            "predict_rows": predict_rows,
            "manifest": prediction_manifest,
            "model_desc": model_desc,
            "model_json": model_and_weights["model"],
            "weights_json": model_and_weights["weights"],
            "weights_file": "/tmp/{}".format(test_label)
        }

        res = make_predictions(data)
        self.assertEqual(
            res["status"],
            ERR)
        self.assertEqual(
            res["data"],
            None)
Пример #3
0
 def test_regression(self):
     req = self.build_regression_request()
     res = make_predictions(req)
     self.assertEqual(
         res["status"],
         SUCCESS)
     self.assertTrue(
         res["data"]["model"])
Пример #4
0
 def test_regression_deep_dnn(self):
     req = self.build_regression_request(
         model_desc_file="./tests/model_desc/deep_dnn.json")
     res = make_predictions(req)
     self.assertEqual(
         res["status"],
         SUCCESS)
     self.assertTrue(
         res["data"]["model"])
Пример #5
0
 def test_regression_wide_dnn_with_auto_scaler(self):
     req = self.build_regression_request(
         model_desc_file="./tests/model_desc/wide_dnn.json")
     req["apply_scaler"] = True
     req["scaler_cast_type"] = "float32"
     res = make_predictions(req)
     self.assertEqual(
         res["status"],
         SUCCESS)
     self.assertTrue(
         res["data"]["model"])
Пример #6
0
    def test_dataset_regression_using_scaler(self):
        req = self.build_dataset_regression_request()
        req["apply_scaler"] = True
        req["scaler_cast_type"] = "float32"
        res = make_predictions(req)
        self.assertEqual(
            res["status"],
            SUCCESS)
        self.assertTrue(
            res["data"]["model"])
        self.assertEqual(
            len(res["data"]["model"].layers),
            3)

        predictions = res["data"]["sample_predictions"]
        self.assertTrue(
            len(predictions),
            18)
Пример #7
0
    def test_ml_predict_missing_manifest(self):
        """
        Test ML Predict helper fails missing data
        """
        test_label = str("mlpredict_{}").format(str(uuid.uuid4()))
        model_desc = self.build_model_desc()
        model_and_weights = self.build_model_and_weights()
        data = {
            "label": test_label,
            "model_desc": model_desc,
            "model_json": model_and_weights["model"],
            "weights_json": model_and_weights["weights"],
            "weights_file": "/tmp/{}".format(test_label)
        }

        res = make_predictions(data)
        self.assertEqual(res["status"], ERR)
        self.assertEqual(res["data"], None)
def task_ml_job(self=None, req_node=None):
    """task_ml_job

    :param self: parent task object for bind=True
    :param req_node: job utils dictionary for passing a dictionary
    """

    log.info(("task - {} - start "
              "req_node={}").format(req_node["task_name"], ppj(req_node)))

    user_data = req_node["data"].get("user_data", None)
    ml_job = req_node["data"].get("ml_job_data", None)
    ml_result = req_node["data"].get("ml_result_data", None)
    model_desc = req_node["data"].get("model_desc", None)
    label_rules = req_node["data"].get("label_rules", None)
    predict_rows = req_node["data"].get("predict_rows", None)

    user_res = db_lookup_user(user_id=user_data["id"])
    user_obj = user_res.get("user_obj", None)
    ml_job_id = None
    ml_result_id = None
    ml_job_obj = None
    found_predictions = []
    found_accuracy = None

    if req_node["use_cache"]:
        ml_job_obj = MLJob.objects.select_related().filter(
            Q(id=int(ml_job["id"])) & Q(user=user_obj)).cache().first()
    else:
        ml_job_obj = MLJob.objects.select_related().filter(
            Q(id=int(ml_job["id"])) & Q(user=user_obj)).first()
    # end of finding the MLJob record

    ml_result_obj = None
    if req_node["use_cache"]:
        ml_result_obj = MLJobResult.objects.select_related().filter(
            Q(id=int(ml_result["id"])) & Q(user=user_obj)).cache().first()
    else:
        ml_result_obj = MLJobResult.objects.select_related().filter(
            Q(id=int(ml_result["id"])) & Q(user=user_obj)).first()
    # end of finding the MLJobResult record

    res = build_task_response(use_cache=req_node["use_cache"],
                              celery_enabled=req_node["celery_enabled"],
                              cache_key=req_node["cache_key"])

    last_step = "not started"
    data = {}
    data["job"] = {}
    data["results"] = {}
    try:

        res["status"] = ERR
        res["error"] = ""

        predict_manifest = ml_job_obj.predict_manifest
        csv_file = predict_manifest.get("csv_file", None)
        meta_file = predict_manifest.get("meta_file", None)
        epochs = int(predict_manifest.get("epochs", "5"))
        test_size = float(predict_manifest.get("test_size", "0.2"))
        batch_size = int(predict_manifest.get("batch_size", "32"))
        verbose = int(predict_manifest.get("verbose", "1"))

        # use pre-trained models in memory by label
        use_model_name = ml_job_obj.predict_manifest.get(
            "use_model_name", None)
        dataset = ml_job_obj.predict_manifest.get("dataset", None)
        predict_rows = ml_job_obj.predict_manifest.get("predict_rows", None)
        predict_feature = ml_job_obj.predict_manifest.get(
            "predict_feature", None)
        features_to_process = ml_job_obj.predict_manifest.get(
            "features_to_process", None)
        ignore_features = ml_job_obj.predict_manifest.get(
            "ignore_features", None)
        publish_to_core = ml_job_obj.predict_manifest.get(
            "publish_to_core", None)
        apply_scaler = ml_job_obj.predict_manifest.get("apply_scaler", True)
        sort_values = ml_job_obj.predict_manifest.get("sort_values", None)
        max_records = int(
            ml_job_obj.predict_manifest.get("max_records", "100000"))
        loss = ml_job_obj.predict_manifest.get("loss", "binary_crossentropy")
        metrics = ml_job_obj.predict_manifest.get("metrics", ["accuracy"])
        optimizer = ml_job_obj.predict_manifest.get("optimizer", "adam")
        histories = ml_job_obj.predict_manifest.get(
            "histories", ["val_loss", "val_acc", "loss", "acc"])

        needs_local_builder = True
        if ((dataset or predict_rows) and features_to_process):
            log.info(("using antinex builder dataset={} predict_rows={} "
                      "features_to_process={}").format(dataset, predict_rows,
                                                       features_to_process))

            needs_local_builder = False
        # flag for bypassing build inside django instead of antinex-utils

        image_file = ml_result_obj.acc_image_file
        version = ml_job_obj.version

        ml_job_id = ml_job_obj.id
        ml_result_id = ml_result_obj.id

        last_step = ("starting user={} "
                     "job.id={} result.id={} predict={} "
                     "model_desc={} "
                     "csv={} meta={}").format(ml_job_obj.user.id, ml_job_id,
                                              ml_result_id,
                                              ml_job_obj.predict_feature,
                                              model_desc, csv_file, meta_file)
        log.info(last_step)

        ml_job_obj.status = "analyzing"
        ml_job_obj.save()

        if needs_local_builder:

            log.info("starting local build_training_request")

            ml_req = build_training_request(
                csv_file=csv_file,
                meta_file=meta_file,
                predict_feature=ml_job_obj.predict_feature,
                test_size=test_size)

            if ml_req["status"] != VALID:
                last_step = ("Stopping for status={} "
                             "errors: {}").format(ml_req["status"],
                                                  ml_req["err"])
                log.error(last_step)
                ml_job_obj.status = "error"
                ml_job_obj.control_state = "error"
                log.info(("saving job={}").format(ml_job_id))
                ml_job_obj.save()
                data["job"] = ml_job_obj.get_public()
                error_data = {"status": ml_req["status"], "err": ml_req["err"]}
                data["results"] = error_data
                res["status"] = ERR
                res["error"] = last_step
                res["data"] = data
                return res
            else:

                predict_manifest["ignore_features"] = \
                    ml_req.get("ignore_features", [])
                predict_manifest["features_to_process"] = \
                    ml_req.get("features_to_process", [])
                if label_rules:
                    predict_manifest["label_rules"] = \
                        label_rules
                else:
                    predict_manifest["label_rules"] = \
                        ml_req["meta_data"]["label_rules"]
                predict_manifest["post_proc_rules"] = \
                    ml_req["meta_data"]["post_proc_rules"]
                predict_manifest["version"] = version

                last_step = ("job.id={} built_training_request={} "
                             "predict={} features={} ignore={} "
                             "label_rules={} post_proc={}").format(
                                 ml_job_obj.id, ml_req["status"],
                                 predict_manifest["predict_feature"],
                                 predict_manifest["features_to_process"],
                                 predict_manifest["ignore_features"],
                                 predict_manifest["label_rules"],
                                 predict_manifest["post_proc_rules"])

                log.info(last_step)

                if ml_job_obj.ml_type == "regression":
                    log.info(("using Keras - regression - "
                              "sequential model ml_type={}").format(
                                  ml_job_obj.ml_type))

                    loss = "mse"
                    metrics = ["mse", "mae", "mape", "cosine"]

                    histories = [
                        "mean_squared_error", "mean_absolute_error",
                        "mean_absolute_percentage_error", "cosine_proximity"
                    ]
                else:
                    log.info(("using Keras - sequential model "
                              "ml_type={}").format(ml_job_obj.ml_type))
                # end of classification vs regression

                ml_job_obj.predict_manifest["epochs"] = epochs
                ml_job_obj.predict_manifest["batch_size"] = batch_size
                ml_job_obj.predict_manifest["verbose"] = verbose
                ml_job_obj.predict_manifest["loss"] = loss
                ml_job_obj.predict_manifest["metrics"] = metrics
                ml_job_obj.predict_manifest["optimizer"] = optimizer
                ml_job_obj.predict_manifest["histories"] = histories
                ml_job_obj.predict_manifest = predict_manifest

        # end of updating without antinex-utils
        # end of if needs_local_builder:

        ml_job_obj.status = "started"
        ml_job_obj.save()

        scores = None
        prediction_req = {
            "label": "job_{}_result_{}".format(ml_job_id, ml_result_id),
            "manifest": ml_job_obj.predict_manifest,
            "model_json": ml_result_obj.model_json,
            "model_desc": model_desc,
            "weights_json": ml_result_obj.model_weights,
        }

        if dataset:
            prediction_req["dataset"] = dataset
        if max_records:
            prediction_req["max_records"] = max_records
        if predict_rows:
            prediction_req["predict_rows"] = json.dumps(predict_rows)
        if features_to_process:
            prediction_req["features_to_process"] = features_to_process
        if ignore_features:
            prediction_req["ignore_features"] = ignore_features
        if apply_scaler:
            prediction_req["apply_scaler"] = apply_scaler
        if sort_values:
            prediction_req["sort_values"] = sort_values
        if loss:
            prediction_req["loss"] = loss
        if metrics:
            prediction_req["metrics"] = metrics
        if optimizer:
            prediction_req["optimizer"] = optimizer
        if histories:
            prediction_req["histories"] = histories
        if predict_feature:
            prediction_req["predict_feature"] = predict_feature
        if csv_file:
            prediction_req["csv_file"] = csv_file
        if meta_file:
            prediction_req["meta_file"] = meta_file

        already_predicted = False

        # if you just want to use the core without django training:
        if publish_to_core or settings.ANTINEX_WORKER_ONLY:
            log.info(("model_name={} only publish={} worker={}").format(
                use_model_name, publish_to_core, settings.ANTINEX_WORKER_ONLY))
            ml_job_obj.status = "launched"
            ml_job_obj.control_state = "launched"
            ml_job_obj.save()
            ml_result_obj.status = "launched"
            ml_result_obj.control_state = "launched"
            ml_result_obj.save()
        else:
            log.info(
                ("start make_predictions req={}").format(ppj(prediction_req)))

            prediction_res = make_predictions(req=prediction_req)

            if prediction_res["status"] != SUCCESS:
                last_step = ("Stopping for prediction_status={} "
                             "errors: {}").format(prediction_res["status"],
                                                  prediction_res["err"])
                log.error(last_step)
                ml_job_obj.status = "error"
                ml_job_obj.control_state = "error"
                log.info(("saving job={}").format(ml_job_id))
                ml_job_obj.save()
                data["job"] = ml_job_obj.get_public()
                error_data = {
                    "status": prediction_res["status"],
                    "err": prediction_res["err"]
                }
                data["results"] = error_data
                res["status"] = ERR
                res["error"] = last_step
                res["data"] = data
                return res

            already_predicted = True
            res_data = prediction_res["data"]
            model = res_data["model"]
            model_weights = res_data["weights"]
            scores = res_data["scores"]
            acc_data = res_data["acc"]
            error_data = res_data["err"]
            predictions_json = {
                "predictions":
                json.loads(
                    pd.Series(res_data["sample_predictions"]).to_json(
                        orient="records"))
            }
            found_predictions = res_data["sample_predictions"]
            found_accuracy = acc_data.get("accuracy", None)

            last_step = ("job={} accuracy={}").format(ml_job_id,
                                                      scores[1] * 100)
            log.info(last_step)

            ml_job_obj.status = "finished"
            ml_job_obj.control_state = "finished"
            ml_job_obj.save()
            log.info(("saved job={}").format(ml_job_id))

            data["job"] = ml_job_obj.get_public()
            acc_data = {"accuracy": scores[1] * 100}
            error_data = None
            log.info(("converting job={} model to json").format(ml_job_id))
            model_json = json.loads(model.to_json())
            log.info(("saving job={} weights_file={}").format(
                ml_job_id, ml_result_obj.model_weights_file))

            log.info(("building job={} results").format(ml_job_id))

            ml_result_obj.status = "finished"
            ml_result_obj.acc_data = acc_data
            ml_result_obj.error_data = error_data
            ml_result_obj.model_json = model_json
            ml_result_obj.model_weights = model_weights
            ml_result_obj.acc_image_file = image_file
            ml_result_obj.predictions_json = predictions_json
            ml_result_obj.version = version
        # end of handing off to core worker without a database connection

        log.info(("saving job={} results").format(ml_job_id))

        # OpenShift 9.6 Postgres container killed the worker
        # here. Interested to see if this is a jsonb/jsonfield problem
        # 2018-05-20
        try:
            ml_result_obj.save()
        except Exception as e:
            res["error"] = ("Failed saving model job.id={} with ex={}").format(
                ml_job_id, e)
            res["status"] = ERR
            res["data"] = data
            log.error(res["error"])
            return res
        # end try/ex

        log.info(("done saving job={} results").format(ml_job_id))
        data["job"] = ml_job_obj.get_public()
        data["results"] = ml_result_obj.get_public()
        res["status"] = SUCCESS
        res["error"] = ""
        res["data"] = data

        if settings.ANTINEX_WORKER_ENABLED and not already_predicted:

            if use_model_name:
                prediction_req["label"] = use_model_name

            log.info(("publishing to core use_model_name={} "
                      "worker={} already_predicted={}").format(
                          use_model_name, settings.ANTINEX_WORKER_ENABLED,
                          already_predicted))

            publish_req = {"body": prediction_req}
            if settings.CELERY_ENABLED:
                task_publish_to_core.delay(publish_node=publish_req)
            else:
                task_publish_to_core(publish_node=publish_req)
        else:
            log.info(("skip - worker={} already_predicted={}").format(
                settings.ANTINEX_WORKER_ENABLED, already_predicted))
        # send to core

    except Exception as e:
        res["status"] = ERR
        res["err"] = ("Failed task={} with "
                      "ex={}").format(req_node["task_name"], e)
        if ml_job_obj:
            data["job"] = ml_job_obj.get_public()
        else:
            data["job"] = None

        if ml_result_obj:
            data["results"] = ml_result_obj.get_public()
        else:
            data["results"] = None
        log.error(res["err"])
    # end of try/ex

    log.info(
        ("task - {} - done - "
         "ml_job.id={} ml_result.id={} "
         "accuracy={} predictions={}").format(req_node["task_name"], ml_job_id,
                                              ml_result_id, found_accuracy,
                                              len(found_predictions)))

    return res