def main():
    config = load_config_from_json("client.json")
    model = rl_client.LiveModel(config, on_error)

    event_id = str(uuid.uuid4())
    context = '{"User":{"id":"a","major":"eng","hobby":"hiking"},"_multi":[{"N1":{"F1":"V1"},"N2":{"F2":"V2"}},{"N3":{"F1":"V3"}}]}'

    response = model.choose_rank(context, event_id=event_id)

    print("event_id: " + response.event_id)
    print("model_id: " + response.model_id)
    print("chosen action id: " + str(response.chosen_action_id))
    print("all action probabilities " + str(response.actions_probabilities))

    response = model.choose_rank(context)
    print("event_id: " + response.event_id)
    print("model_id: " + response.model_id)
    print("chosen action id: " + str(response.chosen_action_id))
    print("all action probabilities " + str(response.actions_probabilities))

    event_id = str(uuid.uuid4())
    response = model.choose_rank(context, event_id=event_id)
    print("event_id: " + response.event_id)
    print("model_id: " + response.model_id)
    print("chosen action id: " + str(response.chosen_action_id))
    print("all action probabilities " + str(response.actions_probabilities))

    outcome = 1.0
    model.report_outcome(event_id, outcome)
Пример #2
0
    def test_choose_rank_invalid_event_id(self):
        model = rl_client.LiveModel(self.config)

        invalid_event_id = ""
        context = '{"_multi":[{},{}]}'
        self.assertRaises(rl_client.RLException, model.choose_rank,
                          invalid_event_id, context)
Пример #3
0
    def test_choose_rank_invalid_context(self):
        model = rl_client.LiveModel(self.config)

        event_id = "event_id"
        invalid_context = ""
        self.assertRaises(rl_client.RLException, model.choose_rank, event_id,
                          invalid_context)
Пример #4
0
    def test_async_error_callback(self):
        def on_error(self, error_code, error_message):
            print("Background error:")
            print(error_message)

        model = rl_client.LiveModel(self.config, on_error)
        # Requires dependency injection to fake a background failure, but we can at least make sure it loads the callback.
        return
Пример #5
0
    def test_report_outcome(self):
        model = rl_client.LiveModel(self.config)

        event_id = "event_id"
        context = '{"_multi":[{},{}]}'
        model.choose_rank(context, event_id=event_id)
        model.report_outcome(event_id, 1.0)
        model.report_outcome(event_id, "{'result':'res'}")
Пример #6
0
    def test_exception_contains_code(self):
        try:
            # This function should fail with an empty config.
            model = rl_client.LiveModel(rl_client.Configuration())
        except rl_client.RLException as e:
            self.assertTrue(hasattr(e, "code"))
            self.assertTrue(hasattr(e, "__str__"))
            # Return early so the fail is not reached.
            return

        self.fail("rl_client.RLException was not raised")
Пример #7
0
    def __init__(self, args):
        self._options = args

        self.config = load_config_from_json(self._options.json_config)
        self._rl_client = rl_client.LiveModel(self.config, on_error)

        tp1 = {"HerbGarden": 0.3, "MachineLearning": 0.2}
        tp2 = {"HerbGarden": 0.1, "MachineLearning": 0.4}

        self._actions = ["HerbGarden", "MachineLearning"]
        self._people = [
            person("rnc", "engineering", "hiking", "spock", tp1),
            person("mk", "psychology", "kids", "7of9", tp2),
        ]
Пример #8
0
    def __init__(self, args):
        self._options = args

        self.config = load_config_from_json(self._options.json_config)
        self._rl_client = rl_client.LiveModel(self.config, on_error)

        tp1 = {'HerbGarden': 0.3, "MachineLearning": 0.2}
        tp2 = {'HerbGarden': 0.1, "MachineLearning": 0.4}

        self._actions = ['HerbGarden', 'MachineLearning']
        self._people = [
            person('rnc', 'engineering', 'hiking', 'spock', tp1),
            person('mk', 'psychology', 'kids', '7of9', tp2)
        ]
def basic_usage_multistep():
    config = load_config_from_json("client.json")

    model = rl_client.LiveModel(config)

    episode1 = rl_client.EpisodeState("episode1")
    episode2 = rl_client.EpisodeState("episode2")

    # episode1, event1
    context1 = '{"shared":{"F1": 1.0}, "_multi": [{"AF1": 2.0}, {"AF1": 3.0}]}'
    response1 = model.request_episodic_decision("event1", None, context1,
                                                episode1)
    print("episode id:", episode1.episode_id)
    print("event id:", response1.event_id)
    print("chosen action:", response1.chosen_action_id)

    # episode2, event1
    context1 = '{"shared":{"F2": 1.0}, "_multi": [{"AF2": 2.0}, {"AF2": 3.0}]}'
    response1 = model.request_episodic_decision("event1", None, context1,
                                                episode2)
    print("episode id:", episode2.episode_id)
    print("event id:", response1.event_id)
    print("chosen action:", response1.chosen_action_id)

    # episode1, event2
    context2 = '{"shared":{"F1": 4.0}, "_multi": [{"AF1": 2.0}, {"AF1": 3.0}]}'
    response2 = model.request_episodic_decision("event2", "event1", context2,
                                                episode1)
    print("episode id:", episode1.episode_id)
    print("event id:", response2.event_id)
    print("chosen action:", response2.chosen_action_id)

    # episode2, event2
    context2 = '{"shared":{"F2": 4.0}, "_multi": [{"AF2": 2.0}, {"AF2": 3.0}]}'
    response2 = model.request_episodic_decision("event2", "event1", context2,
                                                episode2)
    print("episode id:", episode2.episode_id)
    print("event id:", response2.event_id)
    print("chosen action:", response2.chosen_action_id)

    model.report_outcome(episode1.episode_id, "event1", 1.0)
    model.report_outcome(episode2.episode_id, "event2", 1.0)
def main(args):
    options = process_cmd_line(args)

    config = load_config_from_json(options.json_config)
    model = rl_client.LiveModel(config, on_error)
    model.init()

    with open(options.log_file) as fp:
        for count, line in enumerate(fp):
            current_example = json.loads(line)

            context_json = json.dumps(current_example["c"])
            response = model.choose_rank(context_json,
                                         event_id=current_example["EventId"])

            if ("o" in current_example):
                for observation in current_example["o"]:
                    outcome = observation["v"] if isinstance(
                        observation["v"], numbers.Real) else json.dumps(
                            observation["v"])
                    model.report_outcome(observation["EventId"], outcome)
Пример #11
0
    def test_choose_rank(self):
        model = rl_client.LiveModel(self.config)

        event_id = "event_id"
        context = '{"_multi":[{},{}]}'
        model.choose_rank(context, event_id=event_id)