def _predict(self, batch): """ Just return the masker's predict function """ try: return self.predict(batch) except tf_errors.ResourceExhaustedError as err: msg = ( "You do not have enough GPU memory available to run detection at the " "selected batch size. You can try a number of things:" "\n1) Close any other application that is using your GPU (web browsers are " "particularly bad for this)." "\n2) Lower the batchsize (the amount of images fed into the model) by " "editing the plugin settings (GUI: Settings > Configure extract settings, " "CLI: Edit the file faceswap/config/extract.ini)." "\n3) Enable 'Single Process' mode.") raise FaceswapError(msg) from err except Exception as err: if get_backend() == "amd": # pylint:disable=import-outside-toplevel from lib.plaidml_utils import is_plaidml_error if (is_plaidml_error(err) and ("CL_MEM_OBJECT_ALLOCATION_FAILURE" in str(err).upper() or "enough memory for the current schedule" in str(err).lower())): msg = ( "You do not have enough GPU memory available to run detection at " "the selected batch size. You can try a number of things:" "\n1) Close any other application that is using your GPU (web " "browsers are particularly bad for this)." "\n2) Lower the batchsize (the amount of images fed into the " "model) by editing the plugin settings (GUI: Settings > Configure " "extract settings, CLI: Edit the file " "faceswap/config/extract.ini).") raise FaceswapError(msg) from err raise
def _predict(self, batch): """ Wrap models predict function in rotations """ batch["rotmat"] = [np.array([]) for _ in range(len(batch["feed"]))] found_faces = [np.array([]) for _ in range(len(batch["feed"]))] for angle in self.rotation: # Rotate the batch and insert placeholders for already found faces self._rotate_batch(batch, angle) try: batch = self.predict(batch) except tf_errors.ResourceExhaustedError as err: msg = ( "You do not have enough GPU memory available to run detection at the " "selected batch size. You can try a number of things:" "\n1) Close any other application that is using your GPU (web browsers are " "particularly bad for this)." "\n2) Lower the batchsize (the amount of images fed into the model) by " "editing the plugin settings (GUI: Settings > Configure extract settings, " "CLI: Edit the file faceswap/config/extract.ini)." "\n3) Enable 'Single Process' mode.") raise FaceswapError(msg) from err except Exception as err: if get_backend() == "amd": # pylint:disable=import-outside-toplevel from lib.plaidml_utils import is_plaidml_error if (is_plaidml_error(err) and ("CL_MEM_OBJECT_ALLOCATION_FAILURE" in str(err).upper() or "enough memory for the current schedule" in str(err).lower())): msg = ( "You do not have enough GPU memory available to run detection at " "the selected batch size. You can try a number of things:" "\n1) Close any other application that is using your GPU (web " "browsers are particularly bad for this)." "\n2) Lower the batchsize (the amount of images fed into the " "model) by editing the plugin settings (GUI: Settings > Configure " "extract settings, CLI: Edit the file " "faceswap/config/extract.ini).") raise FaceswapError(msg) from err raise if angle != 0 and any([face.any() for face in batch["prediction"]]): logger.verbose("found face(s) by rotating image %s degrees", angle) found_faces = [ face if not found.any() else found for face, found in zip(batch["prediction"], found_faces) ] if all([face.any() for face in found_faces]): logger.trace("Faces found for all images") break batch["prediction"] = found_faces logger.trace( "detect_prediction output: (filenames: %s, prediction: %s, rotmat: %s)", batch["filename"], batch["prediction"], batch["rotmat"]) return batch
def train_one_step(self, viewer, timelapse_kwargs): """ Running training on a batch of images for each side. Triggered from the training cycle in :class:`scripts.train.Train`. * Runs a training batch through the model. * Outputs the iteration's loss values to the console * Logs loss to Tensorboard, if logging is requested. * If a preview or time-lapse has been requested, then pushes sample images through the \ model to generate the previews * Creates a snapshot if the total iterations trained so far meet the requested snapshot \ criteria Notes ----- As every iteration is called explicitly, the Parameters defined should always be ``None`` except on save iterations. Parameters ---------- viewer: :func:`scripts.train.Train._show` The function that will display the preview image timelapse_kwargs: dict The keyword arguments for generating time-lapse previews. If a time-lapse preview is not required then this should be ``None``. Otherwise all values should be full paths the keys being `input_a`, `input_b`, `output`. """ self._model.state.increment_iterations() logger.trace("Training one step: (iteration: %s)", self._model.iterations) do_preview = viewer is not None do_timelapse = timelapse_kwargs is not None snapshot_interval = self._model.command_line_arguments.snapshot_interval do_snapshot = (snapshot_interval != 0 and self._model.iterations - 1 >= snapshot_interval and (self._model.iterations - 1) % snapshot_interval == 0) model_inputs, model_targets = self._feeder.get_batch() try: loss = self._model.model.train_on_batch(model_inputs, y=model_targets) except tf_errors.ResourceExhaustedError as err: msg = ( "You do not have enough GPU memory available to train the selected model at " "the selected settings. You can try a number of things:" "\n1) Close any other application that is using your GPU (web browsers are " "particularly bad for this)." "\n2) Lower the batchsize (the amount of images fed into the model each " "iteration)." "\n3) Try enabling 'Mixed Precision' training." "\n4) Use a more lightweight model, or select the model's 'LowMem' option " "(in config) if it has one.") raise FaceswapError(msg) from err except Exception as err: if get_backend() == "amd": # pylint:disable=import-outside-toplevel from lib.plaidml_utils import is_plaidml_error if (is_plaidml_error(err) and ("CL_MEM_OBJECT_ALLOCATION_FAILURE" in str(err).upper() or "enough memory for the current schedule" in str(err).lower())): msg = ( "You do not have enough GPU memory available to train the selected " "model at the selected settings. You can try a number of things:" "\n1) Close any other application that is using your GPU (web browsers " "are particularly bad for this)." "\n2) Lower the batchsize (the amount of images fed into the model " "each iteration)." "\n3) Use a more lightweight model, or select the model's 'LowMem' " "option (in config) if it has one.") raise FaceswapError(msg) from err raise self._log_tensorboard(loss) loss = self._collate_and_store_loss(loss[1:]) self._print_loss(loss) if do_snapshot: self._model.snapshot() if do_preview: self._feeder.generate_preview(do_preview) self._samples.images = self._feeder.compile_sample(None) samples = self._samples.show_sample() if samples is not None: viewer( samples, "Training - 'S': Save Now. 'R': Refresh Preview. 'M': Toggle Mask. " "'ENTER': Save and Quit") if do_timelapse: self._timelapse.output_timelapse(timelapse_kwargs)