Esempio n. 1
0
def compare_triangles(self, expected, actual):
    self.assertEqual(expected.vert_ids, actual.vert_ids)

    almost_equal(self, expected.normal[0], actual.normal[0], 0.2)
    almost_equal(self, expected.normal[1], actual.normal[1], 0.2)
    almost_equal(self, expected.normal[2], actual.normal[2], 0.2)

    almost_equal(self, expected.distance, actual.distance, 0.2)
Esempio n. 2
0
def test_calc_risk_percentage():
    GIVEN("a handler, a probability and price")
    mediator = MockMediator()
    handler = OrdersHandler(mediator=mediator, bank=1)
    probability = 0.6
    price = 2

    WHEN("we calculate the uncapped risk percentage")
    risk = handler._calc_risk_percentage(probability=probability,
                                         price=price,
                                         kelly_fraction=1,
                                         cap=1)
    THEN("the correct risk percentage is returned")
    assert almost_equal(risk,
                        ((probability * price) - (1 - probability)) / price)

    WHEN("we calculate cap the risk percentage at 10%")
    capped_risk = handler._calc_risk_percentage(probability=probability,
                                                price=price,
                                                kelly_fraction=1,
                                                cap=0.1)
    THEN("the correct risk percentage is returned")
    assert capped_risk == 0.1

    WHEN("we calculate the risk percentage based on a reduced kelly fraction")
    fraction = 0.1
    reduced_risk = handler._calc_risk_percentage(probability=probability,
                                                 price=price,
                                                 kelly_fraction=fraction,
                                                 cap=1)
    THEN("the reduced risk is less than the original risk")
    assert reduced_risk < risk
    THEN("the correct risk percentage is returned")
    assert almost_equal(
        reduced_risk,
        (((probability * price)**fraction - (1 - probability)**fraction) /
         ((price * probability)**fraction + (price *
                                             (1 - probability))**fraction)),
    )
Esempio n. 3
0
def test_log():
    GIVEN("a list of values")
    approx_e = 2.71828182846
    values = [
        approx_e**0,
        approx_e**1,
        approx_e**2,
        approx_e**3,
        approx_e**4,
        approx_e**5,
    ]
    WHEN("we calculate the log for each value in the list")
    for i, value in enumerate(values):
        assert almost_equal(calculate_log(value), i)
Esempio n. 4
0
def test_real_comp_data():
    GIVEN("a set of real life prices and a data transform handler")
    correct_probability = 1
    items = [
        {
            "id": 123,
            "price": 28.0
        },
        {
            "id": 223,
            "price": 120.0
        },
        {
            "id": 323,
            "price": 30.0
        },
        {
            "id": 423,
            "price": 9.1
        },
        {
            "id": 523,
            "price": 17.2
        },
        {
            "id": 623,
            "price": 72.0
        },
        {
            "id": 723,
            "price": 38.5
        },
        {
            "id": 823,
            "price": 52.5
        },
        {
            "id": 923,
            "price": 6.0
        },
        {
            "id": 113,
            "price": 15.0
        },
        {
            "id": 213,
            "price": 4.5
        },
        {
            "id": 313,
            "price": 285.0
        },
        {
            "id": 413,
            "price": 5.6
        },
        {
            "id": 513,
            "price": 34.5
        },
    ]

    handler = TransformHandler(total_probability=correct_probability)
    handler._set_items(items=items)

    expected_data = __calc_compositional_data(
        items=items, correct_probability=correct_probability)

    WHEN("we calculate the compositional data")
    compositional_data = handler._get_compositional_data(price_name="price")

    THEN("a list of dictionaries with the correct values is returned")
    for idx, item in enumerate(compositional_data):
        for key in item.keys():
            assert almost_equal(item.get(key), expected_data[idx].get(key))
    assert almost_equal(
        sum(
            item.get("compositional_probability")
            for item in compositional_data),
        correct_probability,
    )
Esempio n. 5
0
def test_items_excluded_comp_data():
    GIVEN(
        "a set of real life prices, a data transform handler and two items to exclude from the list"
    )
    total_probability = 1
    items_to_exclude = [
        {
            "id": 123,
            "probability": 0.04
        },
        {
            "id": 223,
            "probability": 0.008
        },
    ]
    correct_probability = total_probability - sum(
        item.get("probability") for item in items_to_exclude)
    items = [
        {
            "id": 123,
            "price": 28.0
        },
        {
            "id": 223,
            "price": 120.0
        },
        {
            "id": 323,
            "price": 30.0
        },
        {
            "id": 423,
            "price": 9.1
        },
        {
            "id": 523,
            "price": 17.2
        },
        {
            "id": 623,
            "price": 72.0
        },
        {
            "id": 723,
            "price": 38.5
        },
        {
            "id": 823,
            "price": 52.5
        },
        {
            "id": 923,
            "price": 6.0
        },
        {
            "id": 113,
            "price": 15.0
        },
        {
            "id": 213,
            "price": 4.5
        },
        {
            "id": 313,
            "price": 285.0
        },
        {
            "id": 413,
            "price": 5.6
        },
        {
            "id": 513,
            "price": 34.5
        },
    ]

    handler = TransformHandler(total_probability=total_probability)
    WHEN("we exclude two of the items and calculate the compositional data")

    item_ids_to_exclude = []
    for item in items_to_exclude:
        runner_id = item.get("id")
        handler.set_probability(runner_id=runner_id,
                                probability=item.get("probability"))
        item_ids_to_exclude.append(runner_id)
    handler._set_items(items=items)
    handler._calc_remaining_probability()
    expected_data = __calc_compositional_data(
        items=list(
            filter(lambda item: item.get("id") not in item_ids_to_exclude,
                   items)),
        correct_probability=correct_probability,
    )

    compositional_data = handler._get_compositional_data(price_name="price")

    THEN("a list of dictionaries with the correct values is returned")
    for idx, item in enumerate(compositional_data):
        for key in item.keys():
            assert almost_equal(item.get(key), expected_data[idx].get(key))
    assert almost_equal(
        sum(
            item.get("compositional_probability")
            for item in compositional_data),
        correct_probability,
    )
Esempio n. 6
0
def test_real_comp_data():
    GIVEN("a set of real life prices and a probability handler")
    correct_probability = 1
    items = [
        {
            "id": 123,
            "probability": 0.035714285714286,
        },
        {
            "id": 223,
            "probability": 0.008333333333333
        },
        {
            "id": 323,
            "probability": 0.033333333333333
        },
        {
            "id": 423,
            "probability": 0.10989010989011
        },
        {
            "id": 523,
            "probability": 0.058126746109399
        },
        {
            "id": 623,
            "probability": 0.013888888888889
        },
        {
            "id": 723,
            "probability": 0.025974025974026
        },
        {
            "id": 823,
            "probability": 0.019047619047619
        },
        {
            "id": 923,
            "probability": 0.168067226890756
        },
        {
            "id": 113,
            "probability": 0.066777284297006
        },
        {
            "id": 213,
            "probability": 0.222222222222222
        },
        {
            "id": 313,
            "probability": 0.003508771929825
        },
        {
            "id": 413,
            "probability": 0.18018018018018
        },
        {
            "id": 513,
            "probability": 0.028985507246377
        },
    ]

    handler = ProbabilityHandler(items=items,
                                 name="probability",
                                 correct_probability=correct_probability)

    expected_items = __calc_compositional_probabilities(
        items=items, correct_probability=correct_probability)

    WHEN("we calculate the compositional probabilities")
    compositional_items = handler.calc_compositional_probabilities()
    THEN("a list of dictionaries with the correct values is returned")
    for idx, item in enumerate(compositional_items):
        for key in item.keys():
            assert almost_equal(item.get(key), expected_items[idx].get(key))
    assert almost_equal(
        sum(
            item.get("compositional_probability")
            for item in compositional_items),
        correct_probability,
    )
Esempio n. 7
0
def compare_bounding_spheres(self, expected, actual):
    almost_equal(self, expected.radius, actual.radius)

    almost_equal(self, expected.center[0], actual.center[0], 0.2)
    almost_equal(self, expected.center[1], actual.center[1], 0.2)
    almost_equal(self, expected.center[2], actual.center[2], 0.2)
Esempio n. 8
0
def compare_bounding_boxes(self, expected, actual):
    almost_equal(self, expected.min[0], actual.min[0], 0.2)
    almost_equal(self, expected.min[1], actual.min[1], 0.2)
    almost_equal(self, expected.min[2], actual.min[2], 0.2)

    almost_equal(self, expected.max[0], actual.max[0], 0.2)
    almost_equal(self, expected.max[1], actual.max[1], 0.2)
    almost_equal(self, expected.max[2], actual.max[2], 0.2)
def compare_nodes(self, expected, actual):
    almost_equal(self, expected.min[0], actual.min[0], 0.2)
    almost_equal(self, expected.min[1], actual.min[1], 0.2)
    almost_equal(self, expected.min[2], actual.min[2], 0.2)

    almost_equal(self, expected.max[0], actual.max[0], 0.2)
    almost_equal(self, expected.max[1], actual.max[1], 0.2)
    almost_equal(self, expected.max[2], actual.max[2], 0.2)

    compare_polys(self, expected.polys, actual.polys)
    compare_childrens(self, expected.children, actual.children)
Esempio n. 10
0
def test_fixed_probability(mock_notify):
    GIVEN("a data handler and the directory and file name of a test file")

    directory = "./data/29451865"
    file_name = "1.162069495.txt"
    file = HistoricalExternalAPIFileHander(directory=directory, file=file_name)
    file_data = file.get_file_as_list()
    market_start_time = file.get_market_start_time()

    number_runners = __get_number_runners(data=file_data)
    unfixed_items = number_runners
    fixed_items = 0
    adapter = ExternalAPIMarketRecordAdapter(
        market_start_time=market_start_time)
    pricer = PriceHandler()
    metadata = MetadataHandler()
    mediator = MockMediator()
    correct_probability = 1

    number_records_processed = 0

    WHEN("we feed the data into a handler one record at a time")
    handler = DataHandler(
        mediator=mediator,
        adapter=adapter,
        container=DataContainer(),
    )
    for i, record in enumerate(file_data):
        number_records_processed = i + 1
        if number_records_processed % 10 == 0:
            WHEN("we randomly fix the probability of an item")
            id_to_fix = handler._get_ids_for_model_data()[0]
            fixed_probability = round(
                handler._container.get_last_column_entry(
                    name=("compositional_sp_probability", id_to_fix)),
                4,
            )
            handler._set_probability(runner_id=id_to_fix,
                                     probability=fixed_probability)
            correct_probability -= fixed_probability
            unfixed_items -= 1
            fixed_items += 1

        fixed_probability_ids = handler._get_fixed_probability_ids()
        THEN("the list of fixed probability ids is the correct length")
        assert len(fixed_probability_ids) == fixed_items

        handler.process_data(record)

        THEN("the handler's data has the correct number of records")
        assert handler._container.get_row_count() == number_records_processed

        THEN(
            "the mediator's notify method was called with the correct parameters"
        )
        model_data = handler._get_model_data()
        args, kwargs = mock_notify.call_args
        assert args == ()
        assert kwargs.get("data") == model_data
        assert kwargs.get("event") == "data added to container"

        THEN(
            "there is a record in the model data for each of the unfixed items"
        )
        assert len(model_data) == unfixed_items

        test_record = {
            each.get("id"): each
            for each in adapter.convert(record).get("items")
        }
        total_sp_probability = 0
        total_ex_probability = 0

        for data in model_data:
            THEN("each of the items in the model data has an non-zero id")
            runner_id = data.get("id")
            assert isinstance(runner_id, int)
            assert runner_id > 0

            THEN("the items probability has not been fixed")
            assert runner_id not in fixed_probability_ids

            test_item = test_record.get(runner_id)

            THEN("the data has the correct combined_back_size")
            combined_back_size = data.get("combined_back_size" +
                                          metadata.get_point_in_time_suffix())
            assert combined_back_size == (test_item.get("sp_back_size") +
                                          test_item.get("ex_back_size"))

            THEN(
                "the data contains the compositional sp probability which is between 0 and 1"
            )
            compositional_sp_probability = data.get(
                "compositional_sp_probability" +
                metadata.get_point_in_time_suffix())
            total_sp_probability += compositional_sp_probability
            assert 1 > compositional_sp_probability > 0

            THEN(
                "the data contains the compositional ex probability which is between 0 and 1"
            )
            compositional_ex_average_probability = data.get(
                "compositional_ex_average_probability" +
                metadata.get_point_in_time_suffix())
            total_ex_probability += compositional_ex_average_probability
            assert 1 > compositional_ex_average_probability > 0

            THEN("the data contains the correct offered price")
            offered_price = data.get("ex_offered_back_price" +
                                     metadata.get_point_in_time_suffix())
            assert offered_price > 0
            assert offered_price == test_item.get("ex_offered_back_price")

            THEN("the data contains the correct returns price")
            returns_price = data.get("ex_offered_back_price_mc" +
                                     metadata.get_point_in_time_suffix())
            assert returns_price > 0
            assert returns_price == pricer.remove_commission(
                test_item.get("ex_offered_back_price"))

            THEN(
                "the sp back price time series data returned is of the correct length"
            )
            compositional_sp_back_price_ts = (
                data.get("compositional_sp_back_price" +
                         metadata.get_time_series_suffix()) or [])
            assert len(
                compositional_sp_back_price_ts) == number_records_processed
            THEN(
                "the last record of the time series data matches the probability"
            )
            assert almost_equal(compositional_sp_back_price_ts[-1],
                                1 / compositional_sp_probability)

            THEN(
                "the extract time time series data returned is of the correct length"
            )
            extract_time_ts = (data.get("extract_time" +
                                        metadata.get_time_series_suffix())
                               or [])
            assert len(extract_time_ts) == number_records_processed
            for j, extract_time in enumerate(extract_time_ts):
                if j > 0:
                    THEN("the times in the series are ascending")
                    assert extract_time > extract_time_ts[j - 1]

            THEN(
                "the combined back size time series data returned is of the correct length"
            )
            combined_back_size_ts = (
                data.get("combined_back_size" +
                         metadata.get_time_series_suffix()) or [])
            assert len(combined_back_size_ts) == number_records_processed
            THEN(
                "the last entry in the time series is the same as point in time combined_back_size"
            )
            assert combined_back_size_ts[-1] == combined_back_size
            for j, combined_back_size in enumerate(combined_back_size_ts):
                if j > 0:
                    THEN("the sizes in the series are ascending")
                    assert combined_back_size >= combined_back_size_ts[j - 1]

        THEN("the total ex and sp probabilities from the model_data sum to 1")
        assert almost_equal(total_sp_probability, correct_probability)
        assert almost_equal(total_ex_probability, correct_probability)

    WHEN("we have finished")
    THEN("the data container has the correct number of columns")
    assert handler._container.get_column_count() == __get_number_columns(
        number_runners)
    THEN("the data container has the same number of records as the raw data")
    assert handler._container.get_row_count() == len(file_data)
    THEN("the correct number of runners are contained in the object")
    assert len(handler.get_unique_ids()) == number_runners
    THEN(
        "the correct number of fixed probabilities are contained in the object"
    )
    assert len(handler._get_fixed_probability_ids()) == round_down(
        number_records_processed / 10)