Ejemplo n.º 1
0
  def __getListMetaInfo(self, inferenceElement):
    """ Get field metadata information for inferences that are of list type
    TODO: Right now we assume list inferences are associated with the input field
    metadata
    """
    fieldMetaInfo = []
    inferenceLabel = InferenceElement.getLabel(inferenceElement)

    for inputFieldMeta in self.__inputFieldsMeta:
      if InferenceElement.getInputElement(inferenceElement):
        outputFieldMeta = FieldMetaInfo(
          name=inputFieldMeta.name + ".actual",
          type=inputFieldMeta.type,
          special=inputFieldMeta.special
        )

      predictionField = FieldMetaInfo(
        name=inputFieldMeta.name + "." + inferenceLabel,
        type=inputFieldMeta.type,
        special=inputFieldMeta.special
      )

      fieldMetaInfo.append(outputFieldMeta)
      fieldMetaInfo.append(predictionField)

    return fieldMetaInfo
Ejemplo n.º 2
0
  def __getListMetaInfo(self, inferenceElement):
    """ Get field metadata information for inferences that are of list type
    TODO: Right now we assume list inferences are associated with the input field
    metadata
    """
    fieldMetaInfo = []
    inferenceLabel = InferenceElement.getLabel(inferenceElement)

    for inputFieldMeta in self.__inputFieldsMeta:
      if InferenceElement.getInputElement(inferenceElement):
        outputFieldMeta = FieldMetaInfo(
          name=inputFieldMeta.name + ".actual",
          type=inputFieldMeta.type,
          special=inputFieldMeta.special
        )

      predictionField = FieldMetaInfo(
        name=inputFieldMeta.name + "." + inferenceLabel,
        type=inputFieldMeta.type,
        special=inputFieldMeta.special
      )

      fieldMetaInfo.append(outputFieldMeta)
      fieldMetaInfo.append(predictionField)

    return fieldMetaInfo
Ejemplo n.º 3
0
  def __getDictMetaInfo(self, inferenceElement, inferenceDict):
    """Get field metadate information for inferences that are of dict type"""
    fieldMetaInfo = []
    inferenceLabel = InferenceElement.getLabel(inferenceElement)

    if InferenceElement.getInputElement(inferenceElement):
      fieldMetaInfo.append(FieldMetaInfo(name=inferenceLabel+".actual",
                                         type=FieldMetaType.string,
                                         special = ''))

    keys = sorted(inferenceDict.keys())
    for key in keys:
      fieldMetaInfo.append(FieldMetaInfo(name=inferenceLabel+"."+str(key),
                                         type=FieldMetaType.string,
                                         special=''))


    return fieldMetaInfo
Ejemplo n.º 4
0
  def __getDictMetaInfo(self, inferenceElement, inferenceDict):
    """Get field metadate information for inferences that are of dict type"""
    fieldMetaInfo = []
    inferenceLabel = InferenceElement.getLabel(inferenceElement)

    if InferenceElement.getInputElement(inferenceElement):
      fieldMetaInfo.append(FieldMetaInfo(name=inferenceLabel+".actual",
                                         type=FieldMetaType.string,
                                         special = ''))

    keys = sorted(inferenceDict.keys())
    for key in keys:
      fieldMetaInfo.append(FieldMetaInfo(name=inferenceLabel+"."+str(key),
                                         type=FieldMetaType.string,
                                         special=''))


    return fieldMetaInfo
Ejemplo n.º 5
0
  def _getGroundTruth(self, inferenceElement):
    """
    Get the actual value for this field

    Parameters:
    -----------------------------------------------------------------------
    sensorInputElement:       The inference element (part of the inference) that
                            is being used for this metric
    """
    sensorInputElement = InferenceElement.getInputElement(inferenceElement)
    if sensorInputElement is None:
      return None
    return getattr(self.__currentGroundTruth.sensorInput, sensorInputElement)
Ejemplo n.º 6
0
  def _getGroundTruth(self, inferenceElement):
    """
    Get the actual value for this field

    Parameters:
    -----------------------------------------------------------------------
    sensorInputElement:       The inference element (part of the inference) that
                            is being used for this metric
    """
    sensorInputElement = InferenceElement.getInputElement(inferenceElement)
    if sensorInputElement is None:
      return None
    return getattr(self.__currentGroundTruth.sensorInput, sensorInputElement)
Ejemplo n.º 7
0
  def __constructMetricsModules(self, metricSpecs):
    """
    Creates the required metrics modules

    Parameters:
    -----------------------------------------------------------------------
    metricSpecs:
      A sequence of MetricSpec objects that specify which metric modules to
      instantiate
    """
    if not metricSpecs:
      return

    self.__metricSpecs = metricSpecs
    for spec in metricSpecs:
      if not InferenceElement.validate(spec.inferenceElement):
        raise ValueError("Invalid inference element for metric spec: %r" %spec)

      self.__metrics.append(metrics.getModule(spec))
      self.__metricLabels.append(spec.getLabel())
Ejemplo n.º 8
0
  def __constructMetricsModules(self, metricSpecs):
    """
    Creates the required metrics modules

    Parameters:
    -----------------------------------------------------------------------
    metricSpecs:
      A sequence of MetricSpec objects that specify which metric modules to
      instantiate
    """
    if not metricSpecs:
      return

    self.__metricSpecs = metricSpecs
    for spec in metricSpecs:
      if not InferenceElement.validate(spec.inferenceElement):
        raise ValueError("Invalid inference element for metric spec: %r" %spec)

      self.__metrics.append(metrics.getModule(spec))
      self.__metricLabels.append(spec.getLabel())
Ejemplo n.º 9
0
    def append(self, modelResult):
        """ [virtual method override] Emits a single prediction as input versus
    predicted.

    modelResult:    An opfutils.ModelResult object that contains the model input
                    and output for the current timestep.
    """

        #print "DEBUG: _BasicPredictionWriter: writing modelResult: %r" % (modelResult,)

        # If there are no inferences, don't write anything
        inferences = modelResult.inferences
        hasInferences = False
        if inferences is not None:
            for value in inferences.itervalues():
                hasInferences = hasInferences or (value is not None)

        if not hasInferences:
            return

        if self.__dataset is None:
            self.__openDatafile(modelResult)

        inputData = modelResult.sensorInput

        sequenceReset = int(bool(inputData.sequenceReset))
        outputRow = [sequenceReset]

        # -----------------------------------------------------------------------
        # Write out the raw inputs
        rawInput = modelResult.rawInput
        for field in self._rawInputNames:
            outputRow.append(str(rawInput[field]))

        # -----------------------------------------------------------------------
        # Write out the inference element info
        for inferenceElement, outputVal in inferences.iteritems():
            inputElement = InferenceElement.getInputElement(inferenceElement)
            if inputElement:
                inputVal = getattr(inputData, inputElement)
            else:
                inputVal = None

            if type(outputVal) in (list, tuple):
                assert type(inputVal) in (list, tuple, None)

                for iv, ov in zip(inputVal, outputVal):
                    # Write actual
                    outputRow.append(str(iv))

                    # Write inferred
                    outputRow.append(str(ov))
            elif isinstance(outputVal, dict):
                if inputVal is not None:
                    # If we have a predicted field, include only that in the actuals
                    if modelResult.predictedFieldIdx is not None:
                        outputRow.append(
                            str(inputVal[modelResult.predictedFieldIdx]))
                    else:
                        outputRow.append(str(inputVal))
                for key in sorted(outputVal.keys()):
                    outputRow.append(str(outputVal[key]))
            else:
                if inputVal is not None:
                    outputRow.append(str(inputVal))
                outputRow.append(str(outputVal))

        metrics = modelResult.metrics
        for metricName in self.__metricNames:
            outputRow.append(metrics.get(metricName, 0.0))

        #print "DEBUG: _BasicPredictionWriter: writing outputRow: %r" % (outputRow,)

        self.__dataset.appendRecord(outputRow)

        self.__dataset.flush()

        return
Ejemplo n.º 10
0
    def __openDatafile(self, modelResult):
        """Open the data file and write the header row"""

        # Write reset bit
        resetFieldMeta = FieldMetaInfo(name="reset",
                                       type=FieldMetaType.integer,
                                       special=FieldMetaSpecial.reset)

        self.__outputFieldsMeta.append(resetFieldMeta)

        # -----------------------------------------------------------------------
        # Write each of the raw inputs that go into the encoders
        rawInput = modelResult.rawInput
        rawFields = rawInput.keys()
        rawFields.sort()
        for field in rawFields:
            if field.startswith('_') or field == 'reset':
                continue
            value = rawInput[field]
            meta = FieldMetaInfo(name=field,
                                 type=FieldMetaType.string,
                                 special=FieldMetaSpecial.none)
            self.__outputFieldsMeta.append(meta)
            self._rawInputNames.append(field)

        # -----------------------------------------------------------------------
        # Handle each of the inference elements
        for inferenceElement, value in modelResult.inferences.iteritems():
            inferenceLabel = InferenceElement.getLabel(inferenceElement)

            # TODO: Right now we assume list inferences are associated with
            # The input field metadata
            if type(value) in (list, tuple):
                # Append input and prediction field meta-info
                self.__outputFieldsMeta.extend(
                    self.__getListMetaInfo(inferenceElement))

            elif isinstance(value, dict):
                self.__outputFieldsMeta.extend(
                    self.__getDictMetaInfo(inferenceElement, value))
            else:

                if InferenceElement.getInputElement(inferenceElement):
                    self.__outputFieldsMeta.append(
                        FieldMetaInfo(name=inferenceLabel + ".actual",
                                      type=FieldMetaType.string,
                                      special=''))
                self.__outputFieldsMeta.append(
                    FieldMetaInfo(name=inferenceLabel,
                                  type=FieldMetaType.string,
                                  special=''))

        if self.__metricNames:
            for metricName in self.__metricNames:
                metricField = FieldMetaInfo(name=metricName,
                                            type=FieldMetaType.float,
                                            special=FieldMetaSpecial.none)

                self.__outputFieldsMeta.append(metricField)

        # Create the inference directory for our experiment
        inferenceDir = _FileUtils.createExperimentInferenceDir(
            self.__experimentDir)

        # Consctruct the prediction dataset file path
        filename = (self.__label + "." +
                    opfutils.InferenceType.getLabel(self.__inferenceType) +
                    ".predictionLog.csv")
        self.__datasetPath = os.path.join(inferenceDir, filename)

        # Create the output dataset
        print "OPENING OUTPUT FOR PREDICTION WRITER AT: %r" % self.__datasetPath
        print "Prediction field-meta: %r" % (
            [tuple(i) for i in self.__outputFieldsMeta], )
        self.__dataset = FileRecordStream(streamID=self.__datasetPath,
                                          write=True,
                                          fields=self.__outputFieldsMeta)

        # Copy data from checkpoint cache
        if self.__checkpointCache is not None:
            self.__checkpointCache.seek(0)

            reader = csv.reader(self.__checkpointCache, dialect='excel')

            # Skip header row
            try:
                header = reader.next()
            except StopIteration:
                print "Empty record checkpoint initializer for %r" % (
                    self.__datasetPath, )
            else:
                assert tuple(self.__dataset.getFieldNames()) == tuple(header), \
                  "dataset.getFieldNames(): %r; predictionCheckpointFieldNames: %r" % (
                  tuple(self.__dataset.getFieldNames()), tuple(header))

            # Copy the rows from checkpoint
            numRowsCopied = 0
            while True:
                try:
                    row = reader.next()
                except StopIteration:
                    break

                #print "DEBUG: restoring row from checkpoint: %r" % (row,)

                self.__dataset.appendRecord(row)
                numRowsCopied += 1

            self.__dataset.flush()

            print "Restored %d rows from checkpoint for %r" % (
                numRowsCopied, self.__datasetPath)

            # Dispose of our checkpoint cache
            self.__checkpointCache.close()
            self.__checkpointCache = None

        return
Ejemplo n.º 11
0
  def append(self, modelResult):
    """ [virtual method override] Emits a single prediction as input versus
    predicted.

    modelResult:    An opfutils.ModelResult object that contains the model input
                    and output for the current timestep.
    """

    #print "DEBUG: _BasicPredictionWriter: writing modelResult: %r" % (modelResult,)

    # If there are no inferences, don't write anything
    inferences = modelResult.inferences
    hasInferences = False
    if inferences is not None:
      for value in inferences.itervalues():
        hasInferences = hasInferences or (value is not None)

    if not hasInferences:
      return

    if self.__dataset is None:
      self.__openDatafile(modelResult)

    inputData = modelResult.sensorInput

    sequenceReset = int(bool(inputData.sequenceReset))
    outputRow = [sequenceReset]


    # -----------------------------------------------------------------------
    # Write out the raw inputs
    rawInput = modelResult.rawInput
    for field in self._rawInputNames:
      outputRow.append(str(rawInput[field]))

    # -----------------------------------------------------------------------
    # Write out the inference element info
    for inferenceElement, outputVal in inferences.iteritems():
      inputElement = InferenceElement.getInputElement(inferenceElement)
      if inputElement:
        inputVal = getattr(inputData, inputElement)
      else:
        inputVal = None

      if type(outputVal) in (list, tuple):
        assert type(inputVal) in (list, tuple, None)

        for iv, ov in zip(inputVal, outputVal):
          # Write actual
          outputRow.append(str(iv))

          # Write inferred
          outputRow.append(str(ov))
      elif isinstance(outputVal, dict):
        if inputVal is not None:
          # If we have a predicted field, include only that in the actuals
          if modelResult.predictedFieldName is not None:
            outputRow.append(str(inputVal[modelResult.predictedFieldName]))
          else:
            outputRow.append(str(inputVal))
        for key in sorted(outputVal.keys()):
          outputRow.append(str(outputVal[key]))
      else:
        if inputVal is not None:
          outputRow.append(str(inputVal))
        outputRow.append(str(outputVal))

    metrics = modelResult.metrics
    for metricName in self.__metricNames:
      outputRow.append(metrics.get(metricName, 0.0))

    #print "DEBUG: _BasicPredictionWriter: writing outputRow: %r" % (outputRow,)

    self.__dataset.appendRecord(outputRow)

    self.__dataset.flush()

    return
Ejemplo n.º 12
0
  def __openDatafile(self, modelResult):
    """Open the data file and write the header row"""

    # Write reset bit
    resetFieldMeta = FieldMetaInfo(
      name="reset",
      type=FieldMetaType.integer,
      special = FieldMetaSpecial.reset)

    self.__outputFieldsMeta.append(resetFieldMeta)


    # -----------------------------------------------------------------------
    # Write each of the raw inputs that go into the encoders
    rawInput = modelResult.rawInput
    rawFields = rawInput.keys()
    rawFields.sort()
    for field in rawFields:
      if field.startswith('_') or field == 'reset':
        continue
      value = rawInput[field]
      meta = FieldMetaInfo(name=field, type=FieldMetaType.string,
                           special=FieldMetaSpecial.none)
      self.__outputFieldsMeta.append(meta)
      self._rawInputNames.append(field)


    # -----------------------------------------------------------------------
    # Handle each of the inference elements
    for inferenceElement, value in modelResult.inferences.iteritems():
      inferenceLabel = InferenceElement.getLabel(inferenceElement)

      # TODO: Right now we assume list inferences are associated with
      # The input field metadata
      if type(value) in (list, tuple):
        # Append input and prediction field meta-info
        self.__outputFieldsMeta.extend(self.__getListMetaInfo(inferenceElement))

      elif isinstance(value, dict):
          self.__outputFieldsMeta.extend(self.__getDictMetaInfo(inferenceElement,
                                                                value))
      else:

        if InferenceElement.getInputElement(inferenceElement):
          self.__outputFieldsMeta.append(FieldMetaInfo(name=inferenceLabel+".actual",
                type=FieldMetaType.string, special = ''))
        self.__outputFieldsMeta.append(FieldMetaInfo(name=inferenceLabel,
                type=FieldMetaType.string, special = ''))

    if self.__metricNames:
      for metricName in self.__metricNames:
        metricField = FieldMetaInfo(
          name = metricName,
          type = FieldMetaType.float,
          special = FieldMetaSpecial.none)

        self.__outputFieldsMeta.append(metricField)

    # Create the inference directory for our experiment
    inferenceDir = _FileUtils.createExperimentInferenceDir(self.__experimentDir)

    # Consctruct the prediction dataset file path
    filename = (self.__label + "." +
               opfutils.InferenceType.getLabel(self.__inferenceType) +
               ".predictionLog.csv")
    self.__datasetPath = os.path.join(inferenceDir, filename)

    # Create the output dataset
    print "OPENING OUTPUT FOR PREDICTION WRITER AT: {0!r}".format(self.__datasetPath)
    print "Prediction field-meta: {0!r}".format([tuple(i) for i in self.__outputFieldsMeta])
    self.__dataset = FileRecordStream(streamID=self.__datasetPath, write=True,
                                     fields=self.__outputFieldsMeta)

    # Copy data from checkpoint cache
    if self.__checkpointCache is not None:
      self.__checkpointCache.seek(0)

      reader = csv.reader(self.__checkpointCache, dialect='excel')

      # Skip header row
      try:
        header = reader.next()
      except StopIteration:
        print "Empty record checkpoint initializer for {0!r}".format(self.__datasetPath)
      else:
        assert tuple(self.__dataset.getFieldNames()) == tuple(header), \
          "dataset.getFieldNames(): {0!r}; predictionCheckpointFieldNames: {1!r}".format(
          tuple(self.__dataset.getFieldNames()), tuple(header))

      # Copy the rows from checkpoint
      numRowsCopied = 0
      while True:
        try:
          row = reader.next()
        except StopIteration:
          break

        #print "DEBUG: restoring row from checkpoint: %r" % (row,)

        self.__dataset.appendRecord(row)
        numRowsCopied += 1

      self.__dataset.flush()

      print "Restored {0:d} rows from checkpoint for {1!r}".format(
        numRowsCopied, self.__datasetPath)

      # Dispose of our checkpoint cache
      self.__checkpointCache.close()
      self.__checkpointCache = None

    return