Exemple #1
0
    def train(
        self,
        train_dataset: Optional[Dataset],
        eval_dataset: Optional[Dataset],
        data_module: Optional[ReAgentDataModule],
        num_epochs: int,
        reader_options: ReaderOptions,
        resource_options: Optional[ResourceOptions],
    ) -> RLTrainingOutput:

        batch_preprocessor = self.build_batch_preprocessor()
        reporter = self.get_reporter()
        # pyre-fixme[16]: `Trainer` has no attribute `set_reporter`.
        # pyre-fixme[16]: `Trainer` has no attribute `set_reporter`.
        self.trainer.set_reporter(reporter)

        # assert eval_dataset is None

        self._lightning_trainer = train_eval_lightning(
            train_dataset=train_dataset,
            eval_dataset=eval_dataset,
            trainer_module=self.trainer,
            data_module=data_module,
            num_epochs=num_epochs,
            use_gpu=self.use_gpu,
            batch_preprocessor=batch_preprocessor,
            reader_options=self.reader_options,
            checkpoint_path=self._lightning_checkpoint_path,
            resource_options=resource_options or ResourceOptions(),
        )
        # pyre-fixme[16]: `RLTrainingReport` has no attribute `make_union_instance`.
        training_report = RLTrainingReport.make_union_instance(
            reporter.generate_training_report())
        return RLTrainingOutput(training_report=training_report)
Exemple #2
0
    def train(
        self,
        train_dataset: Optional[Dataset],
        eval_dataset: Optional[Dataset],
        test_dataset: Optional[Dataset],
        data_module: Optional[ReAgentDataModule],
        num_epochs: int,
        reader_options: ReaderOptions,
        resource_options: ResourceOptions,
    ) -> RLTrainingOutput:
        batch_preprocessor = self.build_batch_preprocessor(
            resource_options.use_gpu)
        reporter = self.get_reporter()
        # pyre-fixme[16]: `Trainer` has no attribute `set_reporter`.
        # pyre-fixme[16]: `Trainer` has no attribute `set_reporter`.
        self.trainer.set_reporter(reporter)

        # assert eval_dataset is None

        # pyre-fixme[16]: `ActorCriticBase` has no attribute `_lightning_trainer`.
        self._lightning_trainer = train_eval_lightning(
            train_dataset=train_dataset,
            eval_dataset=eval_dataset,
            test_dataset=test_dataset,
            trainer_module=self.trainer,
            data_module=data_module,
            num_epochs=num_epochs,
            logger_name="ActorCritic",
            batch_preprocessor=batch_preprocessor,
            reader_options=self.reader_options,
            checkpoint_path=self._lightning_checkpoint_path,
            resource_options=resource_options or ResourceOptions(),
        )
        if reporter is None:
            training_report = None
        else:
            # pyre-fixme[16]: `RLTrainingReport` has no attribute `make_union_instance`.
            training_report = RLTrainingReport.make_union_instance(
                reporter.generate_training_report())
        logger_data = self._lightning_trainer.logger.line_plot_aggregated
        self._lightning_trainer.logger.clear_local_data()
        return RLTrainingOutput(training_report=training_report,
                                logger_data=logger_data)
    def __init__(
        self,
        *,
        input_table_spec: Optional[TableSpec] = None,
        reward_options: Optional[RewardOptions] = None,
        setup_data: Optional[Dict[str, bytes]] = None,
        saved_setup_data: Optional[Dict[str, bytes]] = None,
        reader_options: Optional[ReaderOptions] = None,
        resource_options: Optional[ResourceOptions] = None,
        model_manager=None,
    ):
        super().__init__()
        self.input_table_spec = input_table_spec
        self.reward_options = reward_options or RewardOptions()
        self.reader_options = reader_options or ReaderOptions()
        self.resource_options = resource_options or ResourceOptions(gpu=0)
        self._model_manager = model_manager
        self.setup_data = setup_data
        self.saved_setup_data = saved_setup_data or {}

        self._setup_done = False
Exemple #4
0
def query_and_train(
    input_table_spec: TableSpec,
    model: ModelManager__Union,
    num_epochs: int,
    use_gpu: bool,
    *,
    setup_data: Optional[Dict[str, bytes]] = None,
    saved_setup_data: Optional[Dict[str, bytes]] = None,
    normalization_data_map: Optional[Dict[str, NormalizationData]] = None,
    reward_options: Optional[RewardOptions] = None,
    reader_options: Optional[ReaderOptions] = None,
    resource_options: Optional[ResourceOptions] = None,
    warmstart_path: Optional[str] = None,
    validator: Optional[ModelValidator__Union] = None,
    publisher: Optional[ModelPublisher__Union] = None,
    named_model_ids: Optional[ModuleNameToEntityId] = None,
    recurring_period: Optional[RecurringPeriod] = None,
) -> RLTrainingOutput:
    child_workflow_id = get_workflow_id()
    if named_model_ids is None:
        named_model_ids = get_new_named_entity_ids(model.value.serving_module_names())

    logger.info("Starting query")

    reward_options = reward_options or RewardOptions()
    reader_options = reader_options or ReaderOptions()
    resource_options = resource_options or ResourceOptions()
    manager = model.value

    if saved_setup_data is not None:

        def _maybe_get_bytes(v) -> bytes:
            if isinstance(v, bytes):
                return v

            # HACK: FBLearner sometimes pack bytes into Blob
            return v.data

        saved_setup_data = {k: _maybe_get_bytes(v) for k, v in saved_setup_data.items()}

    if setup_data is None:
        data_module = manager.get_data_module(
            input_table_spec=input_table_spec,
            reward_options=reward_options,
            reader_options=reader_options,
            saved_setup_data=saved_setup_data,
        )
        if data_module is not None:
            setup_data = data_module.prepare_data()
            # Throw away existing normalization data map
            normalization_data_map = None

    if sum([int(setup_data is not None), int(normalization_data_map is not None)]) != 1:
        raise ValueError("setup_data and normalization_data_map are mutually exclusive")

    train_dataset = None
    eval_dataset = None
    if normalization_data_map is not None:
        calc_cpe_in_training = manager.should_generate_eval_dataset
        sample_range_output = get_sample_range(input_table_spec, calc_cpe_in_training)
        train_dataset = manager.query_data(
            input_table_spec=input_table_spec,
            sample_range=sample_range_output.train_sample_range,
            reward_options=reward_options,
        )
        eval_dataset = None
        if calc_cpe_in_training:
            eval_dataset = manager.query_data(
                input_table_spec=input_table_spec,
                sample_range=sample_range_output.eval_sample_range,
                reward_options=reward_options,
            )

    logger.info("Starting training")
    results = manager.train_workflow(
        train_dataset,
        eval_dataset,
        num_epochs=num_epochs,
        use_gpu=use_gpu,
        setup_data=setup_data,
        normalization_data_map=normalization_data_map,
        named_model_ids=named_model_ids,
        child_workflow_id=child_workflow_id,
        reward_options=reward_options,
        reader_options=reader_options,
        resource_options=resource_options,
        warmstart_path=warmstart_path,
    )

    if validator is not None:
        results = run_validator(validator, results)

    if publisher is not None:
        results = run_publisher(
            publisher,
            model,
            results,
            named_model_ids,
            child_workflow_id,
            recurring_period,
        )

    return results
Exemple #5
0
def train_workflow(
    model_manager: ModelManager,
    train_dataset: Optional[Dataset],
    eval_dataset: Optional[Dataset],
    *,
    num_epochs: int,
    use_gpu: bool,
    named_model_ids: ModuleNameToEntityId,
    child_workflow_id: int,
    setup_data: Optional[Dict[str, bytes]] = None,
    normalization_data_map: Optional[Dict[str, NormalizationData]] = None,
    reward_options: Optional[RewardOptions] = None,
    reader_options: Optional[ReaderOptions] = None,
    resource_options: Optional[ResourceOptions] = None,
    warmstart_path: Optional[str] = None,
) -> RLTrainingOutput:
    writer = SummaryWriter()
    logger.info("TensorBoard logging location is: {}".format(writer.log_dir))

    if setup_data is not None:
        data_module = model_manager.get_data_module(
            setup_data=setup_data,
            reward_options=reward_options,
            reader_options=reader_options,
            resource_options=resource_options,
        )
        assert data_module is not None
        data_module.setup()
    else:
        data_module = None

    if normalization_data_map is None:
        assert data_module is not None
        normalization_data_map = data_module.get_normalization_data_map()

    warmstart_input_path = warmstart_path or None
    trainer_module = model_manager.build_trainer(
        use_gpu=use_gpu,
        reward_options=reward_options,
        normalization_data_map=normalization_data_map,
    )

    if not reader_options:
        reader_options = ReaderOptions()

    if not resource_options:
        resource_options = ResourceOptions()

    with summary_writer_context(writer):
        train_output, lightning_trainer = model_manager.train(
            trainer_module,
            train_dataset,
            eval_dataset,
            None,
            data_module,
            num_epochs,
            reader_options,
            resource_options,
            checkpoint_path=warmstart_input_path,
        )

    output_paths = {}
    for module_name, serving_module in model_manager.build_serving_modules(
            trainer_module, normalization_data_map).items():
        torchscript_output_path = f"{model_manager.__class__.__name__}_{module_name}_{round(time.time())}.torchscript"
        torch.jit.save(serving_module, torchscript_output_path)
        logger.info(f"Saved {module_name} to {torchscript_output_path}")
        output_paths[module_name] = torchscript_output_path
    return dataclasses.replace(train_output, output_paths=output_paths)
Exemple #6
0
def query_and_train(
    input_table_spec: TableSpec,
    model: ModelManager__Union,
    normalization_data_map: Dict[str, NormalizationData],
    num_epochs: int,
    use_gpu: bool,
    reward_options: Optional[RewardOptions] = None,
    reader_options: Optional[ReaderOptions] = None,
    resource_options: Optional[ResourceOptions] = None,
    warmstart_path: Optional[str] = None,
    validator: Optional[ModelValidator__Union] = None,
    publisher: Optional[ModelPublisher__Union] = None,
    parent_workflow_id: Optional[int] = None,
    recurring_period: Optional[RecurringPeriod] = None,
) -> RLTrainingOutput:
    child_workflow_id = get_workflow_id()
    if parent_workflow_id is None:
        parent_workflow_id = child_workflow_id

    logger.info("Starting query")

    reward_options = reward_options or RewardOptions()
    reader_options = reader_options or ReaderOptions()
    resource_options = resource_options or ResourceOptions()
    manager = model.value

    calc_cpe_in_training = manager.should_generate_eval_dataset
    sample_range_output = get_sample_range(input_table_spec,
                                           calc_cpe_in_training)
    train_dataset = manager.query_data(
        input_table_spec=input_table_spec,
        sample_range=sample_range_output.train_sample_range,
        reward_options=reward_options,
    )
    eval_dataset = None
    if calc_cpe_in_training:
        eval_dataset = manager.query_data(
            input_table_spec=input_table_spec,
            sample_range=sample_range_output.eval_sample_range,
            reward_options=reward_options,
        )

    logger.info("Starting training")
    results = manager.train_workflow(
        train_dataset,
        eval_dataset,
        normalization_data_map,
        num_epochs,
        use_gpu,
        parent_workflow_id=parent_workflow_id,
        child_workflow_id=child_workflow_id,
        reward_options=reward_options,
        reader_options=reader_options,
        resource_options=resource_options,
        warmstart_path=warmstart_path,
    )

    if validator is not None:
        results = run_validator(validator, results)

    if publisher is not None:
        results = run_publisher(
            publisher,
            model,
            results,
            parent_workflow_id,
            child_workflow_id,
            recurring_period,
        )

    return results
Exemple #7
0
    def train_workflow(
        self,
        train_dataset: Optional[Dataset],
        eval_dataset: Optional[Dataset],
        *,
        num_epochs: int,
        use_gpu: bool,
        named_model_ids: ModuleNameToEntityId,
        child_workflow_id: int,
        setup_data: Optional[Dict[str, bytes]] = None,
        normalization_data_map: Optional[Dict[str, NormalizationData]] = None,
        reward_options: Optional[RewardOptions] = None,
        reader_options: Optional[ReaderOptions] = None,
        resource_options: Optional[ResourceOptions] = None,
        warmstart_path: Optional[str] = None,
    ) -> RLTrainingOutput:
        writer = SummaryWriter()
        logger.info("TensorBoard logging location is: {}".format(
            writer.log_dir))

        if setup_data is not None:
            data_module = self.get_data_module(setup_data=setup_data,
                                               reader_options=reader_options)
            assert data_module is not None
            data_module.setup()
        else:
            data_module = None

        if normalization_data_map is None:
            assert data_module is not None
            normalization_data_map = data_module.get_normalization_data_map(
                self.required_normalization_keys)

        warmstart_input_path = warmstart_path or None
        self.initialize_trainer(
            use_gpu=use_gpu,
            # pyre-fixme[6]: Expected `RewardOptions` for 2nd param but got
            #  `Optional[RewardOptions]`.
            reward_options=reward_options,
            normalization_data_map=normalization_data_map,
            warmstart_path=warmstart_input_path,
        )

        if not reader_options:
            reader_options = ReaderOptions()

        if not resource_options:
            resource_options = ResourceOptions()

        with summary_writer_context(writer):
            train_output = self.train(
                train_dataset,
                eval_dataset,
                data_module,
                num_epochs,
                reader_options,
                resource_options,
            )

        output_paths = {}
        for module_name, serving_module in self.build_serving_modules().items(
        ):
            # TODO: make this a parameter
            torchscript_output_path = f"model_{round(time.time())}.torchscript"
            serving_module = self.build_serving_module()
            torch.jit.save(serving_module, torchscript_output_path)
            logger.info(f"Saved {module_name} to {torchscript_output_path}")
            output_paths[module_name] = torchscript_output_path
        return dataclasses.replace(train_output, output_paths=output_paths)