Exemplo n.º 1
0
def uploadEvalModel(request):
    Helper.permissions(request)

    tempLocation = Helper.tempStorage(
        request.POST['file'].file)  # Store the file temporarily
    try:
        # Produce a climate model output object
        model = ClimateModelOutput(tempLocation)
    except Exception as e:
        # report the issue when trying to work on climate model output
        raise exc.HTTPBadRequest(str(e))
    finally:
        # Delete the tempory file
        os.remove(tempLocation)

    # Record model within the users temp space
    directory = os.path.join(TEMPSTORAGE, request.session["username"])
    try:
        if not os.path.isdir(directory):
            # Ensure tempory space for the models
            os.mkdir(directory)

        # Save the model in the tempory space.
        model.save(os.path.join(directory, request.POST["file"].filename))
    except Exception as e:
        raise exc.HTTPBadRequest(str(e))

    return exc.HTTPOk()
Exemplo n.º 2
0
    def test_hash(self):
        f1 = ClimateModelOutput(TEST_NC)
        f1.save(TEST_FILE)
        hash_f1 = hash(f1)
        f2 = ClimateModelOutput.load(TEST_FILE)
        self.assertEqual(hash_f1, hash(f2))

        f2.lat[0] += 1
        self.assertNotEqual(hash_f1, hash(f2))

        dict_of_clim_mod = dict()
        dict_of_clim_mod[f1] = "a"
        dict_of_clim_mod[f2] = "b"
        self.assertEqual(dict_of_clim_mod[f1], "a")
        self.assertEqual(dict_of_clim_mod[f2], "b")

        f2 = ClimateModelOutput.load(TEST_FILE)
        f2.lon[0] += 1
        self.assertNotEqual(hash_f1, hash(f2))

        f2 = ClimateModelOutput.load(TEST_FILE)
        f2.numpy_arrays = np.zeros_like(f2.numpy_arrays)
        self.assertNotEqual(hash_f1, hash(f2))

        f2 = ClimateModelOutput.load(TEST_FILE)
        f2.sparse = "Some thing other than None"
        self.assertNotEqual(f1.sparse, f2.sparse)
        # But this should not affect equality
        self.assertEqual(hash_f1, hash(f2))
def submitBatch(request):
    Helper.permissions(request)

    try:
        qid = int(request.params["qid"])
    except:
        raise exc.HTTPBadRequest("Question ID not passed")

    username = request.session["username"]

    # Collect Expert information
    expert = db.executeOne("collectEMIdentifier", [username, qid])
    batch = db.execute("collectBatch", [username, qid])
    db.execute("submitBatch", [username, qid])  # Update batch assignment

    # Prepare batch information
    CMOs, scores = [], []
    for CMOid, score in batch:
        CMOs.append(
            ClimateModelOutput.load(os.path.join(CMOSTORAGE + str(CMOid))))
        scores.append(score)

    # Train the expert model
    ProcessRunner().add_process(expert["identifier"], username, CMOs, scores)

    return {}
    def models():

        if not CMOStore._model_store:
            for modelName in os.listdir(CMOSTORAGE):
                if modelName == "Placeholder.ignore": continue
                model = ClimateModelOutput.load(
                    os.path.join(CMOSTORAGE, modelName))
                CMOStore._model_store.append(model)

        return CMOStore._model_store
def modelFileUploader(request):
    Helper.permissions(request)

    tempLocation = Helper.tempStorage(
        request.POST['file'].file)  # Store the file temporarily

    try:
        # Produce a climate model output object
        model = ClimateModelOutput(tempLocation)
    except Exception as e:
        # report the issue when trying to work on climate model output
        print(str(e))
        raise exc.HTTPServerError(str(e))
    finally:
        # Delete the tempory file
        os.remove(tempLocation)

    # Record the new model file and store appropriately
    modelID = db.executeID("modelUploaded", [request.session["username"]])
    model.save(os.path.join(CMOSTORAGE, str(modelID)))

    return {}
    def test_regress(self):
        model = ExpertModelAPI()
        model_id = model.create_model(model_type="KNN_regress")
        train = [permute_cmo(ClimateModelOutput(TEST_NC)) for _ in range(10)]
        labels = [0 for _ in range(5)] + [1 for _ in range(5)]
        performance = model.partial_fit(model_id=model_id,
                                        data=train,
                                        targets=labels)

        self.assertIsInstance(performance, ModelOutputs)
        self.assertIsInstance(performance.L1_loss, float)
        self.assertTrue(0 <= performance.L1_loss <= 1)

        prediction = model.predict(model_id=model_id, data=train)[0]
        self.assertIsInstance(prediction, float)
 def test_all_models(self):
     for model_type in available_models():
         train = [permute_cmo(ClimateModelOutput(TEST_NC)) for _ in range(10)]
         labels = [0 for _ in range(5)] + [1 for _ in range(5)]
         model = ExpertModelAPI()
         model_id = model.create_model(model_type=model_type)
         self.assertIn(model_id, model.models_that_exist)
         self.assertIn(model_id, model.open_models)
         model.close_model(model_id=model_id)
         self.assertIn(model_id, model.models_that_exist)
         self.assertNotIn(model_id, model.open_models)
         model.fit_unsupervised(model_id=model_id, data=train)
         performance = model.partial_fit(model_id=model_id, data=train, targets=labels)
         self.assertIsInstance(performance, ModelOutputs)
         predictions = np.array(model.predict(model_id=model_id, data=train))
         model.close_model(model_id=model_id)
         predictions_2 = np.array(model.predict(model_id=model_id, data=train))
         self.assertTrue(np.all(predictions == predictions_2))
         model.delete_model(model_id=model_id)
Exemplo n.º 8
0
    def test_equal(self):
        f1 = ClimateModelOutput(TEST_NC)
        f1.save(TEST_FILE)
        f2 = ClimateModelOutput.load(TEST_FILE)
        self.assertEqual(f1, f2)

        f2.lat[0] += 1
        self.assertNotEqual(f1, f2)

        f2 = ClimateModelOutput.load(TEST_FILE)
        f2.lon[0] += 1
        self.assertNotEqual(f1, f2)

        f2 = ClimateModelOutput.load(TEST_FILE)
        f2.numpy_arrays = np.zeros_like(f2.numpy_arrays)
        self.assertNotEqual(f1, f2)

        f2 = ClimateModelOutput.load(TEST_FILE)
        f2.sparse = "Some thing other than None"
        self.assertNotEqual(f1.sparse, f2.sparse)
        # But this should not affect equality
        self.assertEqual(f1, f2)
    def test_semi_supervised(self):
        model = ExpertModelAPI()
        model_id = model.create_model(model_type="KNN_PCA")
        train = [permute_cmo(ClimateModelOutput(TEST_NC)) for _ in range(10)]

        for data in train:
            data.numpy_arrays += np.random.normal(size=data.numpy_arrays.shape)

        labels = [0 for _ in range(5)] + [1 for _ in range(5)]
        model.fit_unsupervised(model_id=model_id, data=train)

        performance = model.partial_fit(model_id=model_id,
                                        data=train,
                                        targets=labels)

        self.assertIsInstance(performance, ModelOutputs)
        self.assertIsInstance(performance.L1_loss, float)
        self.assertTrue(0 <= performance.L1_loss <= 1)

        prediction = model.predict(model_id=model_id, data=train)[0]
        self.assertIsInstance(prediction, float)
    def test_initialise_create_and_train(self):
        model = ExpertModelAPI()
        model_id = model.create_model(model_type="KNN_classify")
        self.assertIn(model_id, model.models_that_exist)
        self.assertIn(model_id, model.open_models)

        model.close_model(model_id=model_id)

        self.assertIn(model_id, model.models_that_exist)
        self.assertNotIn(model_id, model.open_models)

        train = [permute_cmo(ClimateModelOutput(TEST_NC)) for _ in range(10)]
        labels = [0 for _ in range(5)] + [1 for _ in range(5)]

        performance = model.partial_fit(model_id=model_id,
                                        data=train,
                                        targets=labels)

        self.assertIsInstance(performance, ModelOutputs)
        self.assertIsInstance(performance.accuracy, float)
        self.assertTrue(0 <= performance.accuracy <= 1)

        predictions = np.array(model.predict(model_id=model_id, data=train))

        model.close_model(model_id=model_id)
        predictions_2 = np.array(model.predict(model_id=model_id, data=train))
        self.assertTrue(np.all(predictions == predictions_2))

        model_id2 = model.create_model(model_type="KNN_classify")
        self.assertNotEqual(model_id, model_id2)

        model.delete_model(model_id=model_id)

        model = ExpertModelAPI()

        with self.assertRaisesRegex(
                ModelDoesNotExistException,
                r"The model with id: 0x[0-9]+ does not exist!!"):
            model.predict(model_id=model_id, data=train)
Exemplo n.º 11
0
def updatePersonal(request):
    Helper.permissions(request)

    try:
        modelSpec = request.params["model_spec"]
    except:
        raise exc.HTTPBadRequest("Information missing")

    user = db.executeOne("User_Info", [request.session["username"]])

    if True:  # TODO
        # Change all the models currently implemented and re train
        expertsModels = db.execute_literal(
            "SELECT * FROM expertModels WHERE username = ?",
            [user["username"]])

        # Update users Model Specification
        db.execute_literal("UPDATE users SET modelSpec = ? WHERE username = ?",
                           [modelSpec, user["username"]])

        for model in expertsModels:
            # Replace the old model with the new model in the database
            identifier = ExpertModelAPI().create_model(model_type=modelSpec)
            db.execute_literal(
                "UPDATE expertModels SET identifier = ? WHERE identifier = ?",
                [identifier, model["identifier"]])
            trainingData = db.execute("collectModelLabels",
                                      [user["username"], model["qid"]])
            CMOs, scores = [], []
            for datapoint in trainingData:
                CMOs.append(
                    ClimateModelOutput.load(
                        os.path.join(CMOSTORAGE, str(datapoint["cmoid"]))))
                scores.append(datapoint["score"])
            ProcessRunner().add_process(identifier, user["username"], CMOs,
                                        scores)
            # Delete old model
            ExpertModelAPI().delete_model(model_id=model["identifier"])
def collectModelKML(request):
    cmo = ClimateModelOutput.load(
        os.path.join(CMOSTORAGE + request.matchdict['cmoid']))
    return cmo.get_geojson(int(request.matchdict["layer"]))
Exemplo n.º 13
0
 def test_geojson(self):  # pylint: disable=no-self-use
     ClimateModelOutput(TEST_NC).get_geojson(0)
Exemplo n.º 14
0
 def test_initialise_save_and_load(self):
     f1 = ClimateModelOutput(TEST_NC)
     print(f1.numpy_arrays.shape)
     f1.save(TEST_FILE)
     f2 = ClimateModelOutput.load(TEST_FILE)
     self.assertEqual(f1, f2)
Exemplo n.º 15
0
 def test_KML_load(self):  # pylint: disable=no-self-use
     ClimateModelOutput(TEST_NC)
Exemplo n.º 16
0
def evalModels(request):
    Helper.permissions(request)

    # Collect information to evaluate

    try:
        data = request.json_body
    except:
        raise exc.HTTPBadRequest()

    questions = {}
    experts = set()
    for pair in data["experts"]:

        experts.add(pair["user"])

        if pair["qid"] in questions:
            questions[pair["qid"]].append(pair["user"])
        else:
            questions[pair["qid"]] = [pair["user"]]

    # Collect Expert information

    expertNames = {
        u: " ".join([i["title"], i["firstname"], i["lastname"]])
        for u in experts for i in db.execute("User_Info", [u])
    }

    # Collect uploaded models to evaluate
    modelDirectory = os.path.join(TEMPSTORAGE, request.session["username"])
    CMOnames = os.listdir(modelDirectory)
    CMOpaths = [
        os.path.join(modelDirectory, filename) for filename in CMOnames
    ]
    CMOs = [ClimateModelOutput.load(path) for path in CMOpaths]

    # Data type to return to the page
    data = []

    for qid in questions.keys():

        questionContents = {
            "text": db.executeOne("questionName", [qid])["text"],
            "models": []
        }

        predictions = []
        errored = []
        for i, expert in enumerate(questions[qid]):

            # Collect the experts model identifier
            identifier = db.executeOne("collectEModel",
                                       [expert, qid])["identifier"]

            # Predict on the CMO's and storage the results
            try:
                predictions.append(ExpertModelAPI().predict(
                    model_id=identifier, data=CMOs))
            except ModelNotTrainedException:
                errored.append(i)

        for i in errored:
            del questions[qid][i]

        questionContents["experts"] = [expertNames[u] for u in questions[qid]]

        count = 0
        for i in range(len(CMOs)):
            count += len(predictions)
            modelInfo = {
                "name": CMOnames[i],
                "values":
                [round(opinion[i] * 20, 5) for opinion in predictions]
            }
            questionContents["models"].append(modelInfo)

        if count != 0:
            data.append(questionContents)

    # Remove uploaded models that have not been evaluated.
    Helper.emptyDirectory(modelDirectory)

    return data