class ModelRunner(object):

    # How many exception traceback tail characters to include in error results
    _MAX_TRACEBACK_TAIL = 400

    def __init__(self, modelID):
        """
    :param modelID: model ID; string
    """
        self._logger = _getLogger()

        self._modelID = modelID

        self._swapperAPI = ModelSwapperInterface()

        self._archiver = _ModelArchiver(self._modelID)

        # "deleteModel" command handler sets this flag to force our processing
        # loop to terminate
        self._done = False

        modelSwapperConfig = ModelSwapperConfig()

        self._targetMaxRequestsPerCheckpoint = modelSwapperConfig.getint(
            "model_runner", "target_requests_per_checkpoint")

        self._profiling = (modelSwapperConfig.getboolean(
            "debugging", "profiling")
                           or self._logger.isEnabledFor(logging.DEBUG))

        if self._profiling:
            self._logger.info("Profiling is turned on")

            self._modelLoadSec = 0

    @property
    def _model(self):
        """ An OPF Model object or None if not loaded yet """
        return self._archiver.model

    @property
    def _inputRowEncoder(self):
        """ An _InputRowEncoder object or None if model not loaded yet """
        return self._archiver.inputRowEncoder

    @property
    def _checkpointMgr(self):
        return self._archiver.checkpointMgr

    def __repr__(self):
        return "%s<modelID=%s>" % (self.__class__.__name__, self._modelID)

    def __enter__(self):
        """ Context Manager protocol method. Allows a ModelRunner instance
    to be used in a "with" statement for automatic clean-up

    Parameters:
    ------------------------------------------------------------------------
    retval:     self.
    """
        return self

    def __exit__(self, _excType, _excVal, _excTb):
        """ Context Manager protocol method. Allows a ModelRunner instance
    to be used in a "with" statement for automatic cleanup

    Returns: False so as not to suppress the exception, if any
    """
        self.close()
        return False

    def close(self):
        """ Clean up """
        self._logger.debug("%r: Closing...", self)
        self._swapperAPI.close()

    @logExceptions(_getLogger())
    def run(self):
        startTime = time.time()

        self._logger.debug("%r: Starting run", self)

        totalBatches = 0
        totalRequests = 0
        totalDupBatches = 0

        # Retrieve the batch IDs that went into the current checkpoint since
        # previous checkpoint
        modelCheckpointBatchIDSet = self._archiver.modelCheckpointBatchIDSet

        try:
            while not self._done:
                currentRunBatchIDSet = set()
                currentRunInputSamples = []
                currentRunNumRequests = 0
                lastRequestBatch = None

                with self._swapperAPI.consumeRequests(
                        modelID=self._modelID, blocking=False) as consumer:

                    if self._profiling:
                        batchStartTime = time.time()

                    # Process the next run of batches until
                    # self._targetMaxRequestsPerCheckpoint is reached or exceeded
                    for candidateBatch in consumer:
                        if (candidateBatch.batchID in currentRunBatchIDSet
                                or candidateBatch.batchID
                                in modelCheckpointBatchIDSet):
                            self._logger.warn(
                                "%r: Already processed this batch=%s, skipping it (we must "
                                "have lost channel/connection before or during ack)",
                                self, candidateBatch.batchID)

                            # Make it go away for good
                            candidateBatch.ack()
                            totalDupBatches += 1
                            continue

                        # NOTE: lastRequestBatch is used after this loop to ack this and
                        # preceding batches
                        lastRequestBatch = candidateBatch

                        currentRunBatchIDSet.add(lastRequestBatch.batchID)

                        totalBatches += 1

                        inputObjects = lastRequestBatch.objects
                        numItems = len(inputObjects)
                        currentRunNumRequests += numItems
                        totalRequests += numItems

                        self._logger.debug(
                            "%r: Processing input batch #%s; batch=%s, numItems=%s...",
                            self, totalBatches, lastRequestBatch.batchID,
                            numItems)

                        if self._profiling:
                            procStartTime = time.time()
                            self._modelLoadSec = 0

                        # Process the input batch
                        results = self._processInputBatch(
                            inputObjects, currentRunInputSamples)

                        # Send results
                        if self._profiling:
                            submitStartTime = time.time()

                        self._swapperAPI.submitResults(modelID=self._modelID,
                                                       results=results)

                        if self._profiling:
                            now = time.time()

                            tailRowTimestampISO = tailRowID = None
                            # Assumption: no empty batches
                            if isinstance(inputObjects[-1], ModelInputRow):
                                # Assumption: entire batch consistes of ModelInputRow objects
                                tailRowTimestampISO = inputObjects[-1].data[
                                    0].isoformat() + "Z"
                                tailRowID = inputObjects[-1].rowID

                            self._logger.info(
                                "{TAG:SWAP.MR.BATCH.DONE} model=%s; batch=%s; numItems=%s; "
                                "tailRowID=%s; tailRowTS=%s; duration=%.4fs; "
                                "loadDuration=%.4fs; procDuration=%.4fs; submitDuration=%.4fs; "
                                "totalBatches=%s; totalItems=%s",
                                self._modelID, lastRequestBatch.batchID,
                                len(results), tailRowID, tailRowTimestampISO,
                                now - batchStartTime, self._modelLoadSec,
                                submitStartTime - procStartTime -
                                self._modelLoadSec, now - submitStartTime,
                                totalBatches, totalRequests)

                        if self._done:
                            self._logger.debug(
                                "%r: command handler requested exit, leaving "
                                "consumer loop", self)
                            break

                        if currentRunNumRequests >= self._targetMaxRequestsPerCheckpoint:
                            self._logger.debug(
                                "End of current run: currentRunNumRequests=%s",
                                currentRunNumRequests)
                            break
                    else:  # for
                        self._done = True
                        self._logger.debug(
                            "%r: No more input batches, leaving consumer loop",
                            self)

                    # Checkpoint the model and ack all request batches processed in this
                    # run
                    if lastRequestBatch is not None:
                        assert len(currentRunBatchIDSet) > 0

                        modelCheckpointBatchIDSet = currentRunBatchIDSet

                        # Checkpoint the model.
                        if self._model is not None:

                            if self._profiling:
                                checkpointStartTime = time.time()

                            self._archiver.saveModel(
                                currentRunBatchIDSet=currentRunBatchIDSet,
                                currentRunInputSamples=currentRunInputSamples)

                            if self._profiling:
                                self._logger.info(
                                    "%r: {TAG:SWAP.MR.CHKPT.DONE} currentRunNumRequests=%s; "
                                    "currentRunNumBatches=%s; duration=%.4fs",
                                    self, currentRunNumRequests,
                                    len(currentRunBatchIDSet),
                                    time.time() - checkpointStartTime)

                        # Ack the last request batch and all unacked batches before it
                        # consumed during this run
                        lastRequestBatch.ack(multiple=True)

                    if not self._done:
                        # Check if SwapController wants to preempt us (it closes the other
                        # end of our stdin to signal the intention)
                        readReadyList = select.select((sys.stdin, ), (), (),
                                                      0)[0]
                        if readReadyList:
                            self._logger.debug(
                                "%r: SwapController wants to preempt us, "
                                "leaving", self)
                            self._done = True
        finally:
            if totalBatches == 0:
                self._logger.warn("%r: zero input batches were processed",
                                  self)

            self._logger.info(
                "%r: {TAG:SWAP.MR.FINAL.SUMMARY} totalBatches=%s; totalRequests=%s; "
                "totalDupBatches=%s; duration=%s", self, totalBatches,
                totalRequests, totalDupBatches,
                time.time() - startTime)

    def _processInputBatch(self, inputObjects, currentRunInputSamples):
        """ Process a batch of model commands and/or inference input data rows

    :param inputObjects: a sequence of ModelCommand or ModelInputRow objects

    :param currentRunInputSamples: a list; the data of input rows will be
      appended to this list for input rows that were processed successfully

    :returns: a sequence of the corresponding ModelCommandResult or
      ModelInferenceResult objects
    TODO: unit-test return value
    """
        results = []

        for request in inputObjects:
            if isinstance(request, ModelCommand):
                results.append(self._processModelCommand(request))

            elif isinstance(request, ModelInputRow):
                results.append(
                    self._processInputRow(request, currentRunInputSamples))

            else:
                raise ValueError("Unexpected request: " + repr(request))

        return results

    def _processModelCommand(self, command):
        """
    command: ModelCommand instance

    Returns: a ModelCommandResult instance
    """
        self._logger.info("%r: Processing model command: %r", self, command)
        try:
            if command.method == "defineModel":
                return self._defineModel(command)
            elif command.method == "deleteModel":
                return self._deleteModel(command)
            elif command.method == "cloneModel":
                return self._cloneModel(command)
            else:
                raise _ModelRunnerError(errno=htmengineerrno.ERR_INVALID_ARG,
                                        msg="Unknown command method=%s" %
                                        (command.method, ))

        except (Exception, _ModelRunnerError) as e:  # pylint: disable=W0703
            self._logger.exception("%r: %r failed;", self, command)
            return ModelCommandResult(
                commandID=command.commandID,
                method=command.method,
                status=(e.errno if isinstance(e, _ModelRunnerError) else
                        htmengineerrno.ERR),
                errorMessage="%s - %r failed on modelID=%s: %s. (tb: ...%s)" %
                (datetime.utcnow().isoformat(), command, self._modelID, str(e)
                 or repr(e),
                 traceback.format_exc()[-self._MAX_TRACEBACK_TAIL:]))

    def _defineModel(self, command):
        """ Handle the "defineModel" command

    command: ModelCommand instance for the "defineModel" command

    retval: ModelCommandResult instance

    raises _ModelRunnerError
    """
        newModelDefinition = dict(
            modelParams=dict(modelConfig=command.args["modelConfig"],
                             inferenceArgs=command.args["inferenceArgs"]),
            inputSchema=command.args["inputRecordSchema"])

        try:
            self._checkpointMgr.define(modelID=self._modelID,
                                       definition=newModelDefinition)
            assert self._model is None
        except model_checkpoint_mgr.ModelAlreadyExists:
            # Check if the existing model has the same definition
            existingModelDefinition = self._checkpointMgr.loadModelDefinition(
                self._modelID)

            if newModelDefinition != existingModelDefinition:
                self._logger.error(
                    "defineModel: modelID=%s already exists with different "
                    "creation meta data; existing=%r; new=%r", self._modelID,
                    existingModelDefinition, newModelDefinition)

                raise _ModelRunnerError(
                    errno=htmengineerrno.ERR_MODEL_ALREADY_EXISTS,
                    msg="defineModel: modelID=%s already exists with different "
                    "creation meta data" % (self._modelID, ))
            else:
                # newModelDefinition is the same as existingModelDefinition; might be
                # side-effect of at-least-once delivery guarantee of the reliable data
                # path, so treat it as success
                self._logger.warn(
                    "defineModel: modelID=%s already exists with same creation meta "
                    "data (side-effect of at-least-once delivery guarantee?)",
                    self._modelID)

        return ModelCommandResult(commandID=command.commandID,
                                  method=command.method,
                                  status=0)

    def _cloneModel(self, command):
        """ Handle the "cloneModel" command

    command: ModelCommand instance for the "cloneModel" command

    retval: ModelCommandResult instance
    """
        try:
            self._checkpointMgr.clone(self._modelID, command.args["modelID"])
        except model_checkpoint_mgr.ModelNotFound as e:
            errorMessage = "%r: Cloning failed - source model not found (%r)" % (
                self,
                e,
            )
            self._logger.error(errorMessage)
            return ModelCommandResult(commandID=command.commandID,
                                      method=command.method,
                                      status=htmengineerrno.ERR_NO_SUCH_MODEL,
                                      errorMessage=errorMessage)
        except model_checkpoint_mgr.ModelAlreadyExists as e:
            # Assuming it's the result of at-least-once delivery semantics
            self._logger.warn(
                "%r: Cloning destination already exists, suppressing (%r)",
                self, e)

        return ModelCommandResult(commandID=command.commandID,
                                  method=command.method,
                                  status=0)

    def _deleteModel(self, command):
        """ Handle the "deleteModel" command

    command: ModelCommand instance for the "deleteModel" command

    retval: ModelCommandResult instance

    raises _ModelRunnerError
    """
        try:
            # Delete the model's checkpoint
            try:
                self._checkpointMgr.remove(modelID=self._modelID)
            except model_checkpoint_mgr.ModelNotFound:
                # Suppress it: could be side-effect of at-least-once delivery
                # guarantee, so treat it as success
                self._logger.warn(
                    "deleteModel: modelID=%s not found (side-effect "
                    "of at-least-once delivery guarantee?); "
                    "suppressing error", self._modelID)
            # Clean up model's resources in ModelSwapperInterface
            self._swapperAPI.cleanUpAfterModelDeletion(self._modelID)
        finally:
            self._archiver = _ModelArchiver(self._modelID)
            self._done = True

        return ModelCommandResult(commandID=command.commandID,
                                  method=command.method,
                                  status=0)

    def _processInputRow(self, row, currentRunInputSamples):
        """
    :param row: ModelInputRow instance

    :param currentRunInputSamples: a list; the input row's data will be appended
      to this list if the row is processed successfully

    :returns: a ModelInferenceResult instance
    """
        try:
            if self._model is None:
                self._loadModel()

            # Convert a flat input row into a format that is consumable by an OPF
            # model
            self._inputRowEncoder.appendRecord(row.data)
            inputRecord = self._inputRowEncoder.getNextRecordDict()

            # Infer
            r = self._model.run(inputRecord)

            currentRunInputSamples.append(row.data)

            return ModelInferenceResult(
                rowID=row.rowID,
                status=0,
                anomalyScore=r.inferences["anomalyScore"],
                multiStepBestPredictions=r.inferences.get(
                    "multiStepBestPredictions"))

        except (Exception, _ModelRunnerError) as e:  # pylint: disable=W0703
            self._logger.exception("%r: Inference failed for row=%r", self,
                                   row)
            return ModelInferenceResult(
                rowID=row.rowID,
                status=(e.errno if isinstance(e, _ModelRunnerError) else
                        htmengineerrno.ERR),
                errorMessage=
                "%s - Inference failed for rowID=%s of modelID=%s (%r): "
                "(tb: ...%s)" %
                (datetime.utcnow().isoformat(), row.rowID, self._modelID, e,
                 traceback.format_exc()[-self._MAX_TRACEBACK_TAIL:]))

    def _loadModel(self):
        """ Load the model and construct the input row encoder

    Side-effect: self._model and self._inputRowEncoder are loaded;
      self._modelLoadSec is set if profiling is turned on
    """
        if self._model is None:
            if self._profiling:
                startTime = time.time()

            # Load the model
            self._archiver.loadModel()

            if self._profiling:
                self._modelLoadSec = time.time() - startTime
class ModelRunner(object):

  # How many exception traceback tail characters to include in error results
  _MAX_TRACEBACK_TAIL = 400


  def __init__(self, modelID):
    """
    :param modelID: model ID; string
    """
    self._logger = _getLogger()

    self._modelID = modelID

    self._swapperAPI = ModelSwapperInterface()

    self._archiver = _ModelArchiver(self._modelID)

    # "deleteModel" command handler sets this flag to force our processing
    # loop to terminate
    self._done = False

    modelSwapperConfig = ModelSwapperConfig()

    self._targetMaxRequestsPerCheckpoint = modelSwapperConfig.getint(
      "model_runner", "target_requests_per_checkpoint")

    self._profiling = (
      modelSwapperConfig.getboolean("debugging", "profiling") or
      self._logger.isEnabledFor(logging.DEBUG))

    if self._profiling:
      self._logger.info("Profiling is turned on")

      self._modelLoadSec = 0


  @property
  def _model(self):
    """ An OPF Model object or None if not loaded yet """
    return self._archiver.model


  @property
  def _inputRowEncoder(self):
    """ An _InputRowEncoder object or None if model not loaded yet """
    return self._archiver.inputRowEncoder


  @property
  def _checkpointMgr(self):
    return self._archiver.checkpointMgr


  def __repr__(self):
    return "%s<modelID=%s>" % (self.__class__.__name__, self._modelID)


  def __enter__(self):
    """ Context Manager protocol method. Allows a ModelRunner instance
    to be used in a "with" statement for automatic clean-up

    Parameters:
    ------------------------------------------------------------------------
    retval:     self.
    """
    return self


  def __exit__(self, _excType, _excVal, _excTb):
    """ Context Manager protocol method. Allows a ModelRunner instance
    to be used in a "with" statement for automatic cleanup

    Returns: False so as not to suppress the exception, if any
    """
    self.close()
    return False


  def close(self):
    """ Clean up """
    self._logger.debug("%r: Closing...", self)
    self._swapperAPI.close()


  @logExceptions(_getLogger())
  def run(self):
    startTime = time.time()

    self._logger.debug("%r: Starting run", self)

    totalBatches = 0
    totalRequests = 0
    totalDupBatches = 0

    # Retrieve the batch IDs that went into the current checkpoint since
    # previous checkpoint
    modelCheckpointBatchIDSet = self._archiver.modelCheckpointBatchIDSet

    try:
      while not self._done:
        currentRunBatchIDSet = set()
        currentRunInputSamples = []
        currentRunNumRequests = 0
        lastRequestBatch = None

        with self._swapperAPI.consumeRequests(
            modelID=self._modelID, blocking=False) as consumer:

          if self._profiling:
            batchStartTime = time.time()

          # Process the next run of batches until
          # self._targetMaxRequestsPerCheckpoint is reached or exceeded
          for candidateBatch in consumer:
            if (candidateBatch.batchID in currentRunBatchIDSet or
                candidateBatch.batchID in modelCheckpointBatchIDSet):
              self._logger.warn(
                "%r: Already processed this batch=%s, skipping it (we must "
                "have lost channel/connection before or during ack)",
                self, candidateBatch.batchID)

              # Make it go away for good
              candidateBatch.ack()
              totalDupBatches += 1
              continue

            # NOTE: lastRequestBatch is used after this loop to ack this and
            # preceding batches
            lastRequestBatch = candidateBatch

            currentRunBatchIDSet.add(lastRequestBatch.batchID)

            totalBatches += 1

            inputObjects = lastRequestBatch.objects
            numItems = len(inputObjects)
            currentRunNumRequests += numItems
            totalRequests += numItems

            self._logger.debug(
              "%r: Processing input batch #%s; batch=%s, numItems=%s...",
              self, totalBatches, lastRequestBatch.batchID, numItems)

            if self._profiling:
              procStartTime = time.time()
              self._modelLoadSec = 0

            # Process the input batch
            results = self._processInputBatch(inputObjects,
                                              currentRunInputSamples)

            # Send results
            if self._profiling:
              submitStartTime = time.time()

            self._swapperAPI.submitResults(modelID=self._modelID,
                                           results=results)

            if self._profiling:
              now = time.time()

              tailRowTimestampISO = tailRowID = None
              # Assumption: no empty batches
              if isinstance(inputObjects[-1], ModelInputRow):
                # Assumption: entire batch consistes of ModelInputRow objects
                tailRowTimestampISO = inputObjects[-1].data[0].isoformat() + "Z"
                tailRowID = inputObjects[-1].rowID

              self._logger.info(
                "{TAG:SWAP.MR.BATCH.DONE} model=%s; batch=%s; numItems=%s; "
                "tailRowID=%s; tailRowTS=%s; duration=%.4fs; "
                "loadDuration=%.4fs; procDuration=%.4fs; submitDuration=%.4fs; "
                "totalBatches=%s; totalItems=%s", self._modelID,
                lastRequestBatch.batchID, len(results), tailRowID,
                tailRowTimestampISO, now - batchStartTime, self._modelLoadSec,
                submitStartTime - procStartTime - self._modelLoadSec,
                now - submitStartTime, totalBatches, totalRequests)

            if self._done:
              self._logger.debug("%r: command handler requested exit, leaving "
                                 "consumer loop", self)
              break

            if currentRunNumRequests >= self._targetMaxRequestsPerCheckpoint:
              self._logger.debug("End of current run: currentRunNumRequests=%s",
                                 currentRunNumRequests)
              break
          else: # for
            self._done = True
            self._logger.debug(
              "%r: No more input batches, leaving consumer loop", self)

          # Checkpoint the model and ack all request batches processed in this
          # run
          if lastRequestBatch is not None:
            assert len(currentRunBatchIDSet) > 0

            modelCheckpointBatchIDSet = currentRunBatchIDSet

            # Checkpoint the model.
            if self._model is not None:

              if self._profiling:
                checkpointStartTime = time.time()

              self._archiver.saveModel(
                currentRunBatchIDSet=currentRunBatchIDSet,
                currentRunInputSamples=currentRunInputSamples)

              if self._profiling:
                self._logger.info(
                  "%r: {TAG:SWAP.MR.CHKPT.DONE} currentRunNumRequests=%s; "
                  "currentRunNumBatches=%s; duration=%.4fs",
                  self, currentRunNumRequests, len(currentRunBatchIDSet),
                  time.time() - checkpointStartTime)

            # Ack the last request batch and all unacked batches before it
            # consumed during this run
            lastRequestBatch.ack(multiple=True)

          if not self._done:
            # Check if SwapController wants to preempt us (it closes the other
            # end of our stdin to signal the intention)
            readReadyList = select.select((sys.stdin,), (), (), 0)[0]
            if readReadyList:
              self._logger.debug("%r: SwapController wants to preempt us, "
                                 "leaving", self)
              self._done = True
    finally:
      if totalBatches == 0:
        self._logger.warn("%r: zero input batches were processed", self)

      self._logger.info(
        "%r: {TAG:SWAP.MR.FINAL.SUMMARY} totalBatches=%s; totalRequests=%s; "
        "totalDupBatches=%s; duration=%s", self, totalBatches, totalRequests,
        totalDupBatches, time.time() - startTime)



  def _processInputBatch(self, inputObjects, currentRunInputSamples):
    """ Process a batch of model commands and/or inference input data rows

    :param inputObjects: a sequence of ModelCommand or ModelInputRow objects

    :param currentRunInputSamples: a list; the data of input rows will be
      appended to this list for input rows that were processed successfully

    :returns: a sequence of the corresponding ModelCommandResult or
      ModelInferenceResult objects
    TODO: unit-test return value
    """
    results = []

    for request in inputObjects:
      if isinstance(request, ModelCommand):
        results.append(self._processModelCommand(request))

      elif isinstance(request, ModelInputRow):
        results.append(self._processInputRow(request, currentRunInputSamples))

      else:
        raise ValueError("Unexpected request: " + repr(request))

    return results


  def _processModelCommand(self, command):
    """
    command: ModelCommand instance

    Returns: a ModelCommandResult instance
    """
    self._logger.info("%r: Processing model command: %r", self, command)
    try:
      if command.method == "defineModel":
        return self._defineModel(command)
      elif command.method == "deleteModel":
        return self._deleteModel(command)
      elif command.method == "cloneModel":
        return self._cloneModel(command)
      else:
        raise _ModelRunnerError(
          errno=htmengineerrno.ERR_INVALID_ARG,
          msg="Unknown command method=%s" % (command.method,))

    except (Exception, _ModelRunnerError) as e:  # pylint: disable=W0703
      self._logger.exception("%r: %r failed;", self, command)
      return ModelCommandResult(
        commandID=command.commandID, method=command.method,
        status=(e.errno if isinstance(e, _ModelRunnerError) else
                htmengineerrno.ERR),
        errorMessage="%s - %r failed on modelID=%s: %s. (tb: ...%s)" % (
          datetime.utcnow().isoformat(), command, self._modelID,
          str(e) or repr(e),
          traceback.format_exc()[-self._MAX_TRACEBACK_TAIL:]))


  def _defineModel(self, command):
    """ Handle the "defineModel" command

    command: ModelCommand instance for the "defineModel" command

    retval: ModelCommandResult instance

    raises _ModelRunnerError
    """
    newModelDefinition = dict(
      modelParams=dict(modelConfig=command.args["modelConfig"],
                       inferenceArgs=command.args["inferenceArgs"]),
      inputSchema=command.args["inputRecordSchema"]
    )

    try:
      self._checkpointMgr.define(modelID=self._modelID,
                                 definition=newModelDefinition)
      assert self._model is None
    except model_checkpoint_mgr.ModelAlreadyExists:
      # Check if the existing model has the same definition
      existingModelDefinition = self._checkpointMgr.loadModelDefinition(
        self._modelID)

      if newModelDefinition != existingModelDefinition:
        self._logger.error(
          "defineModel: modelID=%s already exists with different "
          "creation meta data; existing=%r; new=%r", self._modelID,
          existingModelDefinition, newModelDefinition)

        raise _ModelRunnerError(
          errno=htmengineerrno.ERR_MODEL_ALREADY_EXISTS,
          msg="defineModel: modelID=%s already exists with different "
              "creation meta data" % (self._modelID,))
      else:
        # newModelDefinition is the same as existingModelDefinition; might be
        # side-effect of at-least-once delivery guarantee of the reliable data
        # path, so treat it as success
        self._logger.warn(
          "defineModel: modelID=%s already exists with same creation meta "
          "data (side-effect of at-least-once delivery guarantee?)",
          self._modelID)

    return ModelCommandResult(commandID=command.commandID,
                              method=command.method, status=0)


  def _cloneModel(self, command):
    """ Handle the "cloneModel" command

    command: ModelCommand instance for the "cloneModel" command

    retval: ModelCommandResult instance
    """
    try:
      self._checkpointMgr.clone(self._modelID, command.args["modelID"])
    except model_checkpoint_mgr.ModelNotFound as e:
      errorMessage = "%r: Cloning failed - source model not found (%r)" % (
        self, e,)
      self._logger.error(errorMessage)
      return ModelCommandResult(commandID=command.commandID,
                                method=command.method,
                                status=htmengineerrno.ERR_NO_SUCH_MODEL,
                                errorMessage=errorMessage)
    except model_checkpoint_mgr.ModelAlreadyExists as e:
      # Assuming it's the result of at-least-once delivery semantics
      self._logger.warn(
        "%r: Cloning destination already exists, suppressing (%r)", self, e)

    return ModelCommandResult(commandID=command.commandID,
                              method=command.method, status=0)


  def _deleteModel(self, command):
    """ Handle the "deleteModel" command

    command: ModelCommand instance for the "deleteModel" command

    retval: ModelCommandResult instance

    raises _ModelRunnerError
    """
    try:
      # Delete the model's checkpoint
      try:
        self._checkpointMgr.remove(modelID=self._modelID)
      except model_checkpoint_mgr.ModelNotFound:
        # Suppress it: could be side-effect of at-least-once delivery
        # guarantee, so treat it as success
        self._logger.warn("deleteModel: modelID=%s not found (side-effect "
                          "of at-least-once delivery guarantee?); "
                          "suppressing error", self._modelID)
      # Clean up model's resources in ModelSwapperInterface
      self._swapperAPI.cleanUpAfterModelDeletion(self._modelID)
    finally:
      self._archiver = _ModelArchiver(self._modelID)
      self._done = True

    return ModelCommandResult(commandID=command.commandID,
                              method=command.method, status=0)


  def _processInputRow(self, row, currentRunInputSamples):
    """
    :param row: ModelInputRow instance

    :param currentRunInputSamples: a list; the input row's data will be appended
      to this list if the row is processed successfully

    :returns: a ModelInferenceResult instance
    """
    try:
      if self._model is None:
        self._loadModel()

      # Convert a flat input row into a format that is consumable by an OPF
      # model
      self._inputRowEncoder.appendRecord(row.data)
      inputRecord = self._inputRowEncoder.getNextRecordDict()

      # Infer
      r = self._model.run(inputRecord)

      currentRunInputSamples.append(row.data)

      return ModelInferenceResult(
        rowID=row.rowID,
        status=0,
        anomalyScore=r.inferences["anomalyScore"],
        multiStepBestPredictions=r.inferences.get("multiStepBestPredictions"))

    except (Exception, _ModelRunnerError) as e:  # pylint: disable=W0703
      self._logger.exception("%r: Inference failed for row=%r", self, row)
      return ModelInferenceResult(
        rowID=row.rowID,
        status=(e.errno if isinstance(e, _ModelRunnerError) else
                htmengineerrno.ERR),
        errorMessage="%s - Inference failed for rowID=%s of modelID=%s (%r): "
                     "(tb: ...%s)" % (
                       datetime.utcnow().isoformat(),
                       row.rowID, self._modelID, e,
                       traceback.format_exc()[-self._MAX_TRACEBACK_TAIL:]))


  def _loadModel(self):
    """ Load the model and construct the input row encoder

    Side-effect: self._model and self._inputRowEncoder are loaded;
      self._modelLoadSec is set if profiling is turned on
    """
    if self._model is None:
      if self._profiling:
        startTime = time.time()

      # Load the model
      self._archiver.loadModel()

      if self._profiling:
        self._modelLoadSec = time.time() - startTime