def test_summary_writer_setter(self):
     callbacks, _ = self._get_callbacks_and_incoming_nucleotides()
     callbacks_handler = CallbacksHandler(callbacks=callbacks).build()
     summary_writer = tf.summary.FileWriter(self.get_temp_dir())
     callbacks_handler.summary_writer = summary_writer
     for callback in callbacks:
         self.assertIs(summary_writer, callback.summary_writer)
 def test_iteration_info_setter(self):
     iteration_info = RunIterationInfo(1, 100, 10)
     callbacks, _ = self._get_callbacks_and_incoming_nucleotides()
     callbacks_handler = CallbacksHandler(callbacks=callbacks).build()
     callbacks_handler.iteration_info = iteration_info
     for callback in callbacks:
         self.assertTupleEqual(tuple(iteration_info),
                               tuple(callback.iteration_info))
 def test_end_call_every_callback(self):
     callbacks, _ = self._get_callbacks_and_incoming_nucleotides()
     callbacks_handler = CallbacksHandler(callbacks=callbacks).build()
     for callback in callbacks:
         callback.end = MagicMock(return_value=None)
     callbacks_handler.end()
     for callback in callbacks:
         self.assertEqual(1, callback.end.call_count)
Esempio n. 4
0
 def _maybe_add_empty_callbacks_handler(self):
     if self.callbacks_handler is None:
         self.callbacks_handler = CallbacksHandler(
             callbacks=[]).build()
     elif isinstance(self.callbacks_handler, dict):
         for each_mode in self.callbacks_handler:
             if self.callbacks_handler[each_mode] is None:
                 self.callbacks_handler[each_mode] = CallbacksHandler(
                     callbacks=[]).build()
 def test_evaluator_callbacks_getter(self):
     (callbacks, incoming_nucleotides
      ) = self._get_callbacks_and_incoming_nucleotides()
     evaluator = KPIEvaluator([], []).build()
     kpi_callback = convert_evaluator_to_callback(evaluator)
     callbacks.append(kpi_callback)
     callbacks_handler = CallbacksHandler(callbacks=callbacks).build()
     kpi_callbacks = callbacks_handler.kpi_evaluators
     self.assertListEqual([evaluator], kpi_callbacks)
Esempio n. 6
0
    def _get_callbacks_handler(mode: str):
        callback1 = CoordinatorCallback(name='callback1',
                                        inbound_nodes=['dataset', 'mlp'])
        callback1.incoming_keys = ['image', 'predictions']
        callback1.generated_keys = ['result']

        callbacks = [callback1]
        if mode == 'train':
            callback2 = CoordinatorCallback(
                name='callback2', inbound_nodes=['dataset', 'callback1'])
            callback2.incoming_keys = ['image', 'result']
            callbacks.append(callback2)

        callbacks_handler = CallbacksHandler(callbacks=callbacks).build()
        callbacks_handler.build_dna = MagicMock(return_value=None)
        callbacks_handler._all_nucleotides = {}
        for gene_name in callbacks_handler.gene_name_and_nucleotide_super_cls:
            callbacks_handler.all_nucleotides.update(
                getattr(callbacks_handler, gene_name))
        return callbacks_handler
    def test_call(self):
        (callbacks, incoming_nucleotides
         ) = self._get_callbacks_and_incoming_nucleotides()
        callbacks_handler = CallbacksHandler(callbacks=callbacks).build()
        callbacks_handler.build_dna(incoming_nucleotides)
        data = {
            'input_node1': {
                'output1': 0,
                'output2': 1
            },
            'input_node2': {
                'output1': 3
            }
        }
        result = callbacks_handler(**data)
        result_must = {
            'callback1': {
                'output11': 'callback1_output11',
                'output12': 'callback1_output12'
            },
            'callback2': {
                'output21': 'callback2_output21',
                'output22': 'callback2_output22'
            },
            'callback3': {
                'output31': 'callback3_output31'
            }
        }
        self.assertDictEqual(result_must, result)

        callback1 = callbacks[0]
        callback2 = callbacks[1]
        callback3 = callbacks[2]

        callback1.on_iteration_end.assert_called_once_with(input11=0)
        callback2.on_iteration_end.assert_called_once_with(
            input21=3, input22='callback1_output11')
        callback3.on_iteration_end.assert_called_once_with(
            input31="callback1_output11", input32="callback2_output22")
 def test_kpi_evaluators_dna_helices(self, evaluator_dna_helix,
                                     with_evaluator):
     evaluator_dna_helix.return_value = "evaluator_DNA_helix"
     (callbacks, incoming_nucleotides
      ) = self._get_callbacks_and_incoming_nucleotides()
     if with_evaluator:
         evaluator = KPIEvaluator([], []).build()
         kpi_callback = convert_evaluator_to_callback(evaluator)
         callbacks.append(kpi_callback)
     callbacks_handler = CallbacksHandler(callbacks=callbacks).build()
     kpi_dna_helices = callbacks_handler.kpi_evaluators_dna_helices
     if not with_evaluator:
         self.assertIsNone(kpi_dna_helices)
     else:
         self.assertDictEqual({"kpi_evaluator": "evaluator_DNA_helix"},
                              kpi_dna_helices)
Esempio n. 9
0
def build_train(project_dir: str, *,
                trainer_config: dict,
                datasets_config: dict,
                model_config: Optional[dict] = None,
                plugins_config: list,
                losses_config: list,
                postprocessors_config: Optional[List[dict]] = None,
                metrics_config: Optional[List[dict]] = None,
                kpi_config: Optional[List[dict]] = None,
                summaries_config: Optional[List[dict]] = None,
                callbacks_config: Optional[List[dict]] = None,
                callbacks_train_config: Optional[List[dict]] = None,
                callbacks_eval_config: Optional[List[dict]] = None,
                continue_training: bool = False) -> Trainer:
    """
    Build all components for training

    Parameters
    ----------
    project_dir
        project directory
    trainer_config
        trainer configuration
    datasets_config
        datasets configuration for each run mode
    model_config
        model configuration
    plugins_config
        configurations of single plugins inside of model
    losses_config
        configurations of single losses inside of model
    postprocessors_config
        configurations of single postprocessors inside of model
    metrics_config
        configurations of single metrics inside of model
    kpi_config
        configurations of single kpi plugins and accumulators inside of model
    summaries_config
        configurations of single summaries inside of model
    callbacks_config
        configurations of single callbacks inside of model that will be shared
        for training and evaluation (instances of callbacks will be still
        different)
    callbacks_train_config
        configurations of single callbacks inside of model for training
    callbacks_eval_config
        configurations of single callbacks inside of model for evaluation
    continue_training
        if the training should be continued in case that the project inside
        of project_dir already exists

    Returns
    -------
    trainer
        trainer ready to run

    """
    # pylint: disable=too-many-arguments,too-many-locals
    # train takes so many arguments, more split will be more confusing
    plugins = model_builder.build_model_nucleotides(
        plugins_config, base_cls=ModelPlugin)
    losses = model_builder.build_model_nucleotides(
        losses_config, base_cls=ModelLoss)
    summaries = model_builder.build_model_nucleotides(
        summaries_config, base_cls=ModelSummary)
    postprocessors = model_builder.build_model_nucleotides(
        postprocessors_config, base_cls=ModelPostProcessor)
    metrics = model_builder.build_model_nucleotides(
        metrics_config, base_cls=ModelMetric)
    mixed_precision_config = _build_mixed_precision_config(model_config)

    model = model_builder.build(model_config,
                                plugins=plugins,
                                losses=losses,
                                postprocessors=postprocessors,
                                metrics=metrics,
                                summaries=summaries,
                                mixed_precision_config=mixed_precision_config)

    datasets_config = _filter_datasets_for_run(datasets_config)
    datasets = _build_datasets_for_each_mode(datasets_config)

    callbacks_train_config = ((callbacks_config or []) +
                              (callbacks_train_config or []))
    callbacks_eval_config = ((callbacks_config or []) +
                             (callbacks_eval_config or []))

    kpi_plugins_and_accumulators = kpi_builder.build_kpi_plugins(kpi_config)
    kpi_evaluator_callback = kpi_builder.build_kpi_evaluator_as_callback(
        kpi_plugins_and_accumulators)

    callbacks_train = callback_builder.build_callbacks_chain(
        callbacks_train_config)
    callbacks_eval = callback_builder.build_callbacks_chain(
        callbacks_eval_config)
    if kpi_evaluator_callback:
        callbacks_eval.append(kpi_evaluator_callback)
    callbacks_handler_train = CallbacksHandler(
        callbacks=callbacks_train).build()
    callbacks_handler_eval = CallbacksHandler(
        callbacks=callbacks_eval).build()

    trainer = trainer_builder.build(
        trainer_config=trainer_config,
        model=model,
        project_dir=project_dir,
        datasets=datasets,
        callbacks_handler_train=callbacks_handler_train,
        callbacks_handler_eval=callbacks_handler_eval,
        continue_training=continue_training)
    return trainer
Esempio n. 10
0
def build_infer(project_dir: str, *,
                run_name: Optional[str] = None,
                datafeeder_config: dict,
                callbacks_config: list,
                kpi_config: Optional[List[dict]] = None,
                inferer_config: Optional[dict] = None,
                saved_model: Optional[str] = None,
                checkpoint: Optional[str] = None,
                batch_size: Optional[int],
                number_of_shards: int = 1,
                shard_index: int = 0,
                use_single_process: Optional[bool] = None,
                prefetch_buffer_size: Optional[int] = None,
                use_tensorrt: Optional[bool] = None,
                continue_last: bool = False) -> Inferer:
    """
    Build the inferer for inference run based on its single components and
    configs

    Parameters
    ----------
    project_dir
        project directory
    run_name
        optional run name for inference project
    datafeeder_config
        configuration of data feeder
    callbacks_config
        configurations of single callbacks for inference
    kpi_config
        configurations of single kpi plugins and accumulators inside of model
    inferer_config
        configuration of inferer
    checkpoint
        path to checkpoint file to restore the variables relative to
        project_dir/checkpoint; meta graph must be in the same folder
    saved_model
        path to saved_model folder relative to project_dir/saved_models
    batch_size
        batch size to use for inference
    number_of_shards
        number of shards for datafeeder file list, if file list was provided
    shard_index
        shard index for datafeeder file list, if file list was provided
    prefetch_buffer_size
        number of batches to prefetch; must be >= 1
    use_single_process
        if data prefetching, prediction and callbacks must executed in one
        single process
    use_tensorrt
        if the tensorrt should be enabled
    continue_last
        if last project must be continued

    Returns
    -------
    inferer
        inferer ready to run

    """
    # pylint: disable=too-many-locals
    # all the variables are needed for now
    # TODO([email protected]): refactor and combine arguments
    inferer_config = inferer_config or {}
    if run_name:
        inferer_config["project_additional_kwargs"] = {"run_name": run_name}
    inferer_config = _update_inferer_run_config(
        inferer_config, batch_size, prefetch_buffer_size, use_single_process)
    inferer_config = _update_inferer_load_config(
        inferer_config, saved_model, checkpoint)
    inferer_config = _update_inferer_tensorrt_config(
        inferer_config, use_tensorrt)
    datafeeder_config = _update_datafeeder_shards(
        datafeeder_config, number_of_shards, shard_index)
    data_feeder = data_feeder_builder.build(datafeeder_config)
    callbacks = callback_builder.build_callbacks_chain(callbacks_config)
    kpi_plugins_and_accumulators = kpi_builder.build_kpi_plugins(kpi_config)
    kpi_evaluator_callback = kpi_builder.build_kpi_evaluator_as_callback(
        kpi_plugins_and_accumulators)
    if kpi_evaluator_callback:
        callbacks.append(kpi_evaluator_callback)
    callbacks_handler = CallbacksHandler(callbacks=callbacks).build()
    inferer = inferer_builder.build(project_dir=project_dir,
                                    inferer_config=inferer_config,
                                    data_feeder=data_feeder,
                                    callbacks_handler=callbacks_handler,
                                    continue_last=continue_last)
    return inferer
 def test_mode_step_setter(self):
     callbacks, _ = self._get_callbacks_and_incoming_nucleotides()
     callbacks_handler = CallbacksHandler(callbacks=callbacks).build()
     callbacks_handler.mode = 'TEST_MODE'
     for callback in callbacks:
         self.assertEqual('TEST_MODE', callback.mode)
 def test_summary_step_setter(self):
     callbacks, _ = self._get_callbacks_and_incoming_nucleotides()
     callbacks_handler = CallbacksHandler(callbacks=callbacks).build()
     callbacks_handler.summary_step = 120
     for callback in callbacks:
         self.assertEqual(120, callback.summary_step)
 def test_log_dir_setter(self):
     callbacks, _ = self._get_callbacks_and_incoming_nucleotides()
     callbacks_handler = CallbacksHandler(callbacks=callbacks).build()
     callbacks_handler.log_dir = "current/log_dir"
     for callback in callbacks:
         self.assertEqual("current/log_dir", callback.log_dir)
 def test_number_iterations_per_epoch_setter(self):
     callbacks, _ = self._get_callbacks_and_incoming_nucleotides()
     callbacks_handler = CallbacksHandler(callbacks=callbacks).build()
     callbacks_handler.number_iterations_per_epoch = 123
     for callback in callbacks:
         self.assertEqual(123, callback.number_iterations_per_epoch)