Exemple #1
0
    def test_insert_risk(self):

        dycast_parameters = test_helper_functions.get_dycast_parameters()
        risk_service = risk_service_module.RiskService(dycast_parameters)
        session = database_service.get_sqlalchemy_session()

        gridpoints = geography_service.generate_grid(dycast_parameters)
        point = geography_service.get_shape_from_sqlalch_element(gridpoints[0])

        risk = Risk(risk_date=datetime.date(int(2016), int(3), int(25)),
                    number_of_cases=5,
                    lat=point.x,
                    long=point.y,
                    close_pairs=3,
                    close_space=2,
                    close_time=1,
                    cumulative_probability=0.032)

        session.query(Risk.risk_date).filter(Risk.risk_date == risk.risk_date,
                                             Risk.lat == risk.lat,
                                             Risk.long == risk.long) \
            .delete()

        risk_service.insert_risk(session, risk)
        session.commit()

        session.query(Risk.risk_date).filter(Risk.risk_date == risk.risk_date,
                                             Risk.lat == risk.lat,
                                             Risk.long == risk.long) \
            .one()
Exemple #2
0
    def test_close_time_only(self):

        dycast_parameters = test_helper_functions.get_dycast_parameters()
        risk_service = risk_service_module.RiskService(dycast_parameters)

        comparative_test_service = comparative_test_service_module.ComparativeTestService(dycast_parameters)
        session = database_service.get_sqlalchemy_session()

        riskdate = datetime.date(int(2016), int(3), int(25))
        gridpoints = geography_service.generate_grid(dycast_parameters)

        clusters_per_point_query = risk_service.get_clusters_per_point_query(session, gridpoints, riskdate)
        clusters_per_point = risk_service.get_clusters_per_point_from_query(clusters_per_point_query)

        risk_service.enrich_clusters_per_point_with_close_space_and_time(clusters_per_point)

        # Compare to old query
        daily_cases_query = comparative_test_service.get_daily_cases_query(session,
                                                                           riskdate)
        for point in gridpoints:
            cases_in_cluster_query = comparative_test_service.get_cases_in_cluster_query(daily_cases_query,
                                                                                         point)
            count_old = comparative_test_service.get_close_time_only(cases_in_cluster_query)

            for cluster in clusters_per_point:
                if cluster.point.equals(geography_service.get_shape_from_sqlalch_element(point)):
                    self.assertEquals(cluster.close_in_time, count_old)
Exemple #3
0
    def test_get_clusters_per_point_query(self):

        dycast_parameters = test_helper_functions.get_dycast_parameters(large_dataset=True)
        risk_service = risk_service_module.RiskService(dycast_parameters)
        comparative_test_service = comparative_test_service_module.ComparativeTestService(dycast_parameters)

        session = database_service.get_sqlalchemy_session()

        riskdate = datetime.date(int(2016), int(3), int(25))
        gridpoints = geography_service.generate_grid(dycast_parameters)

        clusters_per_point_query = risk_service.get_clusters_per_point_query(session, gridpoints, riskdate)
        clusters_per_point = risk_service.get_clusters_per_point_from_query(clusters_per_point_query)

        daily_cases_query = comparative_test_service.get_daily_cases_query(session, riskdate)

        for cluster in clusters_per_point:
            point_wkt_element = geography_service.get_wktelement_from_wkt(cluster.point.wkt)

            cases_in_cluster_query = comparative_test_service.get_cases_in_cluster_query(daily_cases_query,
                                                                                         point_wkt_element)

            vector_count_new = cluster.get_case_count()
            vector_count_old = database_service.get_count_for_query(cases_in_cluster_query)

            self.assertEqual(vector_count_new, vector_count_old)
Exemple #4
0
    def test_generate_grid(self):
        dycast_paramaters = dycast_parameters.DycastParameters()

        dycast_paramaters.srid_of_extent = "29193"
        dycast_paramaters.extent_min_x = 197457.283284349
        dycast_paramaters.extent_min_y = 7639474.3256114
        dycast_paramaters.extent_max_x = 198056.722079
        dycast_paramaters.extent_max_y = 7639344.265401

        gridpoints = geography_service.generate_grid(dycast_paramaters)
        self.assertIsNotNone(gridpoints)
        self.assertGreaterEqual(len(gridpoints), 1)
Exemple #5
0
    def test_get_cases_in_cluster_query_old(self):

        dycast_parameters = test_helper_functions.get_dycast_parameters()
        comparative_test_service = comparative_test_service_module.ComparativeTestService(dycast_parameters)

        session = database_service.get_sqlalchemy_session()

        riskdate = datetime.date(int(2016), int(3), int(25))

        gridpoints = geography_service.generate_grid(dycast_parameters)
        point = gridpoints[0]

        daily_cases_query = comparative_test_service.get_daily_cases_query(session, riskdate)

        cases_in_cluster_query = comparative_test_service.get_cases_in_cluster_query(daily_cases_query, point)
        vector_count = database_service.get_count_for_query(cases_in_cluster_query)

        self.assertGreater(vector_count, 0)
Exemple #6
0
    def generate_risk(self):

        session = database_service.get_sqlalchemy_session()
        logging_service.display_current_parameter_set(self.dycast_parameters)

        case_threshold = self.dycast_parameters.case_threshold

        gridpoints = geography_service.generate_grid(self.dycast_parameters)

        day = self.dycast_parameters.startdate
        delta = datetime.timedelta(days=1)

        while day <= self.dycast_parameters.enddate:
            start_time = time.time()
            logging.info("Starting daily_risk for %s", day)
            points_above_threshold = 0

            clusters_per_point_query = self.get_clusters_per_point_query(
                session, gridpoints, day)
            clusters_per_point = self.get_clusters_per_point_from_query(
                clusters_per_point_query)

            for cluster in clusters_per_point:
                vector_count = cluster.get_case_count()
                if vector_count >= case_threshold:
                    points_above_threshold += 1
                    self.get_close_space_and_time_for_cluster(cluster)
                    self.get_cumulative_probability_for_cluster(
                        session, cluster)

                    risk = Risk(
                        risk_date=day,
                        number_of_cases=vector_count,
                        lat=cluster.point.y,
                        long=cluster.point.x,
                        close_pairs=cluster.close_space_and_time,
                        close_space=cluster.close_in_space,
                        close_time=cluster.close_in_time,
                        cumulative_probability=cluster.cumulative_probability)

                    self.insert_risk(session, risk)

            session.commit()

            logging.info("Finished daily_risk for %s: done %s points", day,
                         len(gridpoints))
            logging.info("Total points above threshold of %s: %s",
                         case_threshold, points_above_threshold)
            logging.info("Time elapsed: %.0f seconds",
                         time.time() - start_time)

            day += delta

        try:
            session.commit()
        except SQLAlchemyError, e:
            session.rollback()
            logging.exception(
                "There was a problem committing the risk data session")
            logging.exception(e)
            raise
Exemple #7
0
    def generate_risk(self):

        session = database_service.get_sqlalchemy_session()
        logging_service.display_current_parameter_set(self.dycast_parameters)

        case_threshold = self.dycast_parameters.case_threshold

        gridpoints = geography_service.generate_grid(self.dycast_parameters)

        day = self.dycast_parameters.startdate
        delta = datetime.timedelta(days=1)

        while day <= self.dycast_parameters.enddate:

            daily_cases_query = self.get_daily_cases_query(session, day)
            daily_case_count = database_service.get_count_for_query(
                daily_cases_query)

            if daily_case_count >= case_threshold:
                start_time = time.time()
                logging.info("Starting daily_risk for %s", day)
                points_above_threshold = 0

                clusters_per_point = self.get_clusters_per_point_query(
                    session, gridpoints, day)

                for cluster in clusters_per_point:
                    vector_count = len(cluster.case_array)
                    if vector_count >= case_threshold:
                        points_above_threshold += 1
                        point = geography_service.get_shape_from_sqlalch_element(
                            cluster.point)
                        risk = Risk(risk_date=day,
                                    number_of_cases=vector_count,
                                    lat=point.x,
                                    long=point.y)

                for point in gridpoints:
                    cases_in_cluster_query = self.get_cases_in_cluster_query(
                        daily_cases_query, point)
                    vector_count = database_service.get_count_for_query(
                        cases_in_cluster_query)
                    if vector_count >= case_threshold:
                        points_above_threshold += 1
                        risk = Risk(risk_date=day,
                                    number_of_cases=vector_count,
                                    lat=point.x,
                                    long=point.y)

                        risk.close_pairs = self.get_close_space_and_time(
                            cases_in_cluster_query)
                        risk.close_space = self.get_close_space_only_old(
                            cases_in_cluster_query) - risk.close_pairs
                        risk.close_time = self.get_close_time_only(
                            cases_in_cluster_query) - risk.close_pairs

                        risk.cumulative_probability = self.get_cumulative_probability(
                            session, risk.number_of_cases, risk.close_pairs,
                            risk.close_space, risk.close_time)
                        self.insert_risk(session, risk)

                logging.info("Finished daily_risk for %s: done %s points", day,
                             len(gridpoints))
                logging.info("Total points above threshold of %s: %s",
                             case_threshold, points_above_threshold)
                logging.info("Time elapsed: %.0f seconds",
                             time.time() - start_time)
            else:
                logging.info(
                    "Amount of cases for %s lower than threshold %s: %s, skipping.",
                    day, case_threshold, daily_case_count)

            day += delta

        try:
            session.commit()
        except SQLAlchemyError, e:
            session.rollback()
            logging.exception(
                "There was a problem committing the risk data session")
            logging.exception(e)
            raise