def test_regress(self):
        model = ExpertModelAPI()
        model_id = model.create_model(model_type="KNN_regress")
        train = [permute_cmo(ClimateModelOutput(TEST_NC)) for _ in range(10)]
        labels = [0 for _ in range(5)] + [1 for _ in range(5)]
        performance = model.partial_fit(model_id=model_id,
                                        data=train,
                                        targets=labels)

        self.assertIsInstance(performance, ModelOutputs)
        self.assertIsInstance(performance.L1_loss, float)
        self.assertTrue(0 <= performance.L1_loss <= 1)

        prediction = model.predict(model_id=model_id, data=train)[0]
        self.assertIsInstance(prediction, float)
Esempio n. 2
0
def updatePersonal(request):
    Helper.permissions(request)

    try:
        modelSpec = request.params["model_spec"]
    except:
        raise exc.HTTPBadRequest("Information missing")

    user = db.executeOne("User_Info", [request.session["username"]])

    if True:  # TODO
        # Change all the models currently implemented and re train
        expertsModels = db.execute_literal(
            "SELECT * FROM expertModels WHERE username = ?",
            [user["username"]])

        # Update users Model Specification
        db.execute_literal("UPDATE users SET modelSpec = ? WHERE username = ?",
                           [modelSpec, user["username"]])

        for model in expertsModels:
            # Replace the old model with the new model in the database
            identifier = ExpertModelAPI().create_model(model_type=modelSpec)
            db.execute_literal(
                "UPDATE expertModels SET identifier = ? WHERE identifier = ?",
                [identifier, model["identifier"]])
            trainingData = db.execute("collectModelLabels",
                                      [user["username"], model["qid"]])
            CMOs, scores = [], []
            for datapoint in trainingData:
                CMOs.append(
                    ClimateModelOutput.load(
                        os.path.join(CMOSTORAGE, str(datapoint["cmoid"]))))
                scores.append(datapoint["score"])
            ProcessRunner().add_process(identifier, user["username"], CMOs,
                                        scores)
            # Delete old model
            ExpertModelAPI().delete_model(model_id=model["identifier"])
Esempio n. 3
0
def confirmUser(request):
    if not Helper.HiddenPages.validate(request.path):
        return exc.HTTPNotFound()

    if not Helper.HiddenPages.validate(request.path):
        return exc.HTTPNotFound()

    try:
        username = request.params["username"]
        title = request.params["title"]
        firstname = request.params["firstname"]
        lastname = request.params["lastname"]
        organisation = request.params["organisation"]
        email = request.params["email"]
        password = request.params["password"]
    except:
        raise exc.HTTPBadRequest("Invalid request")

        # Check if username already exists

    if len(db.execute("User_Info", [username])):
        raise exc.HTTPBadRequest("Username Exists")

    salt, hashedPassword = Helper.hashPassword(password)
    db.execute("User_confirmTemp", [
        username, email, salt, hashedPassword, title, firstname, lastname,
        organisation, request.path[-10:]
    ])

    questions = db.execute("collectQuestions", [])
    api = ExpertModelAPI()
    for rows in questions:
        qid = rows["qid"]
        model_id = api.create_model(model_type=DEFAULT_TECH)
        db.execute("eModel_insertModel", [model_id, username, qid])

    Helper.HiddenPages.remove(request.path)
Esempio n. 4
0
def fit_model_and_write_db(model_id, username, CMOs, scores):
    try:
        recordModelMetrics(
            identifier=model_id)  # Clear DB entries of records for this model
        _LOGGER.info("Metrics cleared for modelid={}, username={}".format(
            model_id, username))
        ExpertModelAPI().fit_unsupervised(model_id=model_id,
                                          data=CMOStore.models())
        _LOGGER.info(
            "Model fit unsupervised completed for modelid={}, username={}".
            format(model_id, username))
        metrics = ExpertModelAPI().partial_fit(model_id=model_id,
                                               data=CMOs,
                                               targets=scores)
        _LOGGER.info(
            "Model fit supervised completed for modelid={}, username={}".
            format(model_id, username))
        recordModelMetrics(identifier=model_id, metrics=metrics)
        _LOGGER.info(
            "Metrics written to DB for modelid={}, username={}".format(
                model_id, username))

    except Exception as e:
        print(str(e))  # TODO
    def test_semi_supervised(self):
        model = ExpertModelAPI()
        model_id = model.create_model(model_type="KNN_PCA")
        train = [permute_cmo(ClimateModelOutput(TEST_NC)) for _ in range(10)]

        for data in train:
            data.numpy_arrays += np.random.normal(size=data.numpy_arrays.shape)

        labels = [0 for _ in range(5)] + [1 for _ in range(5)]
        model.fit_unsupervised(model_id=model_id, data=train)

        performance = model.partial_fit(model_id=model_id,
                                        data=train,
                                        targets=labels)

        self.assertIsInstance(performance, ModelOutputs)
        self.assertIsInstance(performance.L1_loss, float)
        self.assertTrue(0 <= performance.L1_loss <= 1)

        prediction = model.predict(model_id=model_id, data=train)[0]
        self.assertIsInstance(prediction, float)
    def test_initialise_create_and_train(self):
        model = ExpertModelAPI()
        model_id = model.create_model(model_type="KNN_classify")
        self.assertIn(model_id, model.models_that_exist)
        self.assertIn(model_id, model.open_models)

        model.close_model(model_id=model_id)

        self.assertIn(model_id, model.models_that_exist)
        self.assertNotIn(model_id, model.open_models)

        train = [permute_cmo(ClimateModelOutput(TEST_NC)) for _ in range(10)]
        labels = [0 for _ in range(5)] + [1 for _ in range(5)]

        performance = model.partial_fit(model_id=model_id,
                                        data=train,
                                        targets=labels)

        self.assertIsInstance(performance, ModelOutputs)
        self.assertIsInstance(performance.accuracy, float)
        self.assertTrue(0 <= performance.accuracy <= 1)

        predictions = np.array(model.predict(model_id=model_id, data=train))

        model.close_model(model_id=model_id)
        predictions_2 = np.array(model.predict(model_id=model_id, data=train))
        self.assertTrue(np.all(predictions == predictions_2))

        model_id2 = model.create_model(model_type="KNN_classify")
        self.assertNotEqual(model_id, model_id2)

        model.delete_model(model_id=model_id)

        model = ExpertModelAPI()

        with self.assertRaisesRegex(
                ModelDoesNotExistException,
                r"The model with id: 0x[0-9]+ does not exist!!"):
            model.predict(model_id=model_id, data=train)
 def test_all_models(self):
     for model_type in available_models():
         train = [permute_cmo(ClimateModelOutput(TEST_NC)) for _ in range(10)]
         labels = [0 for _ in range(5)] + [1 for _ in range(5)]
         model = ExpertModelAPI()
         model_id = model.create_model(model_type=model_type)
         self.assertIn(model_id, model.models_that_exist)
         self.assertIn(model_id, model.open_models)
         model.close_model(model_id=model_id)
         self.assertIn(model_id, model.models_that_exist)
         self.assertNotIn(model_id, model.open_models)
         model.fit_unsupervised(model_id=model_id, data=train)
         performance = model.partial_fit(model_id=model_id, data=train, targets=labels)
         self.assertIsInstance(performance, ModelOutputs)
         predictions = np.array(model.predict(model_id=model_id, data=train))
         model.close_model(model_id=model_id)
         predictions_2 = np.array(model.predict(model_id=model_id, data=train))
         self.assertTrue(np.all(predictions == predictions_2))
         model.delete_model(model_id=model_id)
Esempio n. 8
0
def evalModels(request):
    Helper.permissions(request)

    # Collect information to evaluate

    try:
        data = request.json_body
    except:
        raise exc.HTTPBadRequest()

    questions = {}
    experts = set()
    for pair in data["experts"]:

        experts.add(pair["user"])

        if pair["qid"] in questions:
            questions[pair["qid"]].append(pair["user"])
        else:
            questions[pair["qid"]] = [pair["user"]]

    # Collect Expert information

    expertNames = {
        u: " ".join([i["title"], i["firstname"], i["lastname"]])
        for u in experts for i in db.execute("User_Info", [u])
    }

    # Collect uploaded models to evaluate
    modelDirectory = os.path.join(TEMPSTORAGE, request.session["username"])
    CMOnames = os.listdir(modelDirectory)
    CMOpaths = [
        os.path.join(modelDirectory, filename) for filename in CMOnames
    ]
    CMOs = [ClimateModelOutput.load(path) for path in CMOpaths]

    # Data type to return to the page
    data = []

    for qid in questions.keys():

        questionContents = {
            "text": db.executeOne("questionName", [qid])["text"],
            "models": []
        }

        predictions = []
        errored = []
        for i, expert in enumerate(questions[qid]):

            # Collect the experts model identifier
            identifier = db.executeOne("collectEModel",
                                       [expert, qid])["identifier"]

            # Predict on the CMO's and storage the results
            try:
                predictions.append(ExpertModelAPI().predict(
                    model_id=identifier, data=CMOs))
            except ModelNotTrainedException:
                errored.append(i)

        for i in errored:
            del questions[qid][i]

        questionContents["experts"] = [expertNames[u] for u in questions[qid]]

        count = 0
        for i in range(len(CMOs)):
            count += len(predictions)
            modelInfo = {
                "name": CMOnames[i],
                "values":
                [round(opinion[i] * 20, 5) for opinion in predictions]
            }
            questionContents["models"].append(modelInfo)

        if count != 0:
            data.append(questionContents)

    # Remove uploaded models that have not been evaluated.
    Helper.emptyDirectory(modelDirectory)

    return data