Exemplo n.º 1
0
  def __init__(self, model, task, cmdOptions):
    """ Constructor

    Args:
      model: The OPF Model instance against which to run the task
      task: A dictionary conforming to opfTaskSchema.json
      cmdOptions: ParseCommandLineOptionsResult namedtuple
    """
    validateOpfJsonValue(task, "opfTaskSchema.json")

    # Set up our logger
    self.__logger = logging.getLogger(".".join(
      ['com.numenta', self.__class__.__module__, self.__class__.__name__]))
    #self.__logger.setLevel(logging.DEBUG)

    self.__logger.debug(("Instantiated %s(" + \
                      "model=%r, " + \
                      "task=%r, " + \
                      "cmdOptions=%r)") % \
                        (self.__class__.__name__,
                         model,
                         task,
                         cmdOptions))

    # Generate a new dataset from streamDef and create the dataset reader
    streamDef = task['dataset']
    datasetReader = opfbasicenvironment.BasicDatasetReader(streamDef)

    self.__model = model
    self.__datasetReader = datasetReader
    self.__task = task
    self.__cmdOptions = cmdOptions


    self.__predictionLogger = opfbasicenvironment.BasicPredictionLogger(
      fields=model.getFieldInfo(),
      experimentDir=cmdOptions.experimentDir,
      label=task['taskLabel'],
      inferenceType=self.__model.getInferenceType())

    taskControl = task['taskControl']

    # Create Task Driver
    self.__taskDriver = OPFTaskDriver(
      taskControl=taskControl,
      model=model)

    loggedMetricPatterns = taskControl.get('loggedMetrics', None)
    loggedMetricLabels = matchPatterns(loggedMetricPatterns,
                                       self.__taskDriver.getMetricLabels())

    self.__predictionLogger.setLoggedMetrics(loggedMetricLabels)

    # Create a prediction metrics logger
    self.__metricsLogger = opfbasicenvironment.BasicPredictionMetricsLogger(
      experimentDir=cmdOptions.experimentDir,
      label=task['taskLabel'])