def test_build_objective(): cfg_model = { "outputs": [ { "name": "out1", "loss": { "import": "categorical_crossentropy" }, "loss_weight": 0.7, "metrics": [ { "import": "accuracy" }, { "import": "tensorflow.keras.metrics.Precision", "params": { "name": "p" }, }, { "import": "tf.keras.losses.CategoricalCrossentropy" }, ], "sample_weight_mode": "temporal", }, { "name": "out2", "loss": { "import": "tensorflow.keras.losses.CategoricalCrossentropy" }, "loss_weight": 0.3, }, ] } objective = model.build_objective(cfg_model) assert objective["loss"]["out1"] == "categorical_crossentropy" assert isinstance(objective["loss"]["out2"], tf.keras.losses.CategoricalCrossentropy) assert objective["metrics"]["out1"][0] == "accuracy" assert isinstance(objective["metrics"]["out1"][1], tf.keras.metrics.Precision) assert objective["metrics"]["out1"][1].name == "p" assert isinstance(objective["metrics"]["out1"][2], tf.keras.losses.CategoricalCrossentropy) assert objective["metrics"]["out2"] == [] assert objective["loss_weights"] == {"out1": 0.7, "out2": 0.3} assert objective["sample_weight_mode"] == { "out1": "temporal", "out2": None }
def train( self, cfg: dict, records_train: api.InputRecords, records_validation: api.InputRecords, ) -> tf.keras.Model: """Train the network. Args: cfg: dict, config. records_train: InputRecords, training records. records_validation: InputRecords, validation records. Returns: tf.keras.Model, trained network. """ logger.info("Starting training") tf_utils.reset() cfg = config.prepare_config(cfg) logger.info(f"Creating artifact directory: {self.artifact_dir}") services.make_artifact_dir(self.artifact_dir) io_utils.save_json(cfg, "config.json", self.artifact_dir) io_utils.save_pickle(cfg, "config.pkl", self.artifact_dir) logger.info("Creating datasets") ds_train = dataset.RecordDataset( artifact_dir=self.artifact_dir, cfg_dataset=cfg["dataset"], records=records_train, mode=api.RecordMode.TRAIN, batch_size=cfg["solver"]["batch_size"], ) ds_validation = dataset.RecordDataset( artifact_dir=self.artifact_dir, cfg_dataset=cfg["dataset"], records=records_validation, mode=api.RecordMode.VALIDATION, batch_size=cfg["solver"]["batch_size"], ) network_params = ds_train.transformer.network_params io_utils.save_json(network_params, "network_params.json", self.artifact_dir) io_utils.save_pickle(network_params, "network_params.pkl", self.artifact_dir) logger.info("Building network") net = model.build_network(cfg["model"], network_params) model.check_output_names(cfg["model"], net) logger.info("Compiling network") opt = solver.build_optimizer(cfg["solver"]) objective = model.build_objective(cfg["model"]) net.compile(optimizer=opt, **objective) logger.info("Creating services") callbacks = services.create_all_services(self.artifact_dir, cfg["services"]) if "learning_rate_reducer" in cfg["solver"]: logger.info("Creating learning rate reducer") callbacks.append(solver.create_learning_rate_reducer(cfg["solver"])) logger.info("Training network") net.summary() net.fit( ds_train, validation_data=ds_validation, epochs=cfg["solver"]["epochs"], steps_per_epoch=cfg["solver"].get("steps"), callbacks=callbacks, verbose=1, ) return net
def train( self, cfg: dict, records_train: Union[pd.DataFrame, api.Records], records_validation: Union[pd.DataFrame, api.Records], workers: int = 10, max_queue_size: int = 10, ) -> tf.keras.Model: """Train the network. Args: cfg: dict, config. records_train: Union[pd.DataFrame, Records], training records. records_validation: Union[pd.DataFrame, Records], validation records. workers: int (OPTIONAL = 10), number of process threads for the sequence. max_queue_size: int (OPTIONAL = 10), queue size for the sequence. Returns: tf.keras.Model, trained network. """ logger.info("Starting training") tf_utils.reset() logger.info("Validating config schema and applying defaults") cfg = config.prepare_config(cfg) logger.info(f"Making artifact directory: {self._artifact_dir}") services.make_artifact_dir(self._artifact_dir) logger.info("Saving config") io_utils.save_json(cfg, "config.json", self._artifact_dir) io_utils.save_pickle(cfg, "config.pkl", self._artifact_dir) logger.info("Building datasets") ds_train = dataset.RecordDataset( artifact_dir=self._artifact_dir, cfg_dataset=cfg["dataset"], records=records_train, mode=api.RecordMode.TRAIN, batch_size=cfg["solver"]["batch_size"], ) ds_validation = dataset.RecordDataset( artifact_dir=self._artifact_dir, cfg_dataset=cfg["dataset"], records=records_validation, mode=api.RecordMode.VALIDATION, batch_size=cfg["solver"]["batch_size"], ) network_params = ds_train.transformer.network_params io_utils.save_json(network_params, "network_params.json", self._artifact_dir) io_utils.save_pickle(network_params, "network_params.pkl", self._artifact_dir) logger.info("Building network") net = model.build_network(cfg["model"], network_params) logger.info("Checking network output names match config output names") model.check_output_names(cfg["model"], net) logger.info("Building optimizer") opt = solver.build_optimizer(cfg["solver"]) logger.info("Building objective") objective = model.build_objective(cfg["model"]) logger.info("Compiling network") net.compile(optimizer=opt, **objective) metrics_names = net.metrics_names logger.info("Creating services") callbacks = services.create_all_services( self._artifact_dir, cfg["services"], metrics_names ) if "learning_rate_reducer" in cfg["solver"]: logger.info("Creating learning rate reducer") callbacks.append( solver.create_learning_rate_reducer(cfg["solver"], metrics_names) ) logger.info("Training network") logger.info(net.summary()) net.fit_generator( ds_train, validation_data=ds_validation, epochs=cfg["solver"]["epochs"], steps_per_epoch=cfg["solver"].get("steps"), callbacks=callbacks, use_multiprocessing=(workers > 1), max_queue_size=max_queue_size, workers=workers, verbose=1, ) return net