예제 #1
0
def _createLPFNetwork(addSP = True, addTP = False):
  """Create an 'old-style' network ala LPF and return it."""

  # ==========================================================================
  # Create the encoder and data source stuff we need to configure the sensor
  sensorParams = dict(verbosity = _VERBOSITY)
  encoder = _createEncoder()
  trainFile = findDataset("extra/gym/gym.csv")
  dataSource = FileRecordStream(streamID=trainFile)
  dataSource.setAutoRewind(True)

  # Create all the stuff we need to configure the CLARegion
  g_claConfig['spEnable'] = addSP
  g_claConfig['tpEnable'] = addTP
  claParams = _getCLAParams(encoder = encoder, config= g_claConfig)
  claParams['spSeed'] = g_claConfig['spSeed']
  claParams['tpSeed'] = g_claConfig['tpSeed']

  # ==========================================================================
  # Now create the network itself
  n = Network()

  n.addRegion("sensor", "py.RecordSensor", json.dumps(sensorParams))

  sensor = n.regions['sensor'].getSelf()
  sensor.encoder = encoder
  sensor.dataSource = dataSource

  n.addRegion("level1", "py.CLARegion", json.dumps(claParams))

  n.link("sensor", "level1", "UniformLink", "")
  n.link("sensor", "level1", "UniformLink", "",
         srcOutput="resetOut", destInput="resetIn")

  return n
예제 #2
0
  def __init__(self, inputFilePath, verbosity=1, numLabels=3, spTrainingSize=0,
               tmTrainingSize=0, clsTrainingSize=0, classifierType="KNN"):
    """
    @param inputFilePath      (str)       Path to data formatted for network
                                          API
    @param spTrainingSize     (int)       Number of samples the network has to
                                          be trained on before training the
                                          spatial pooler
    @param tmTrainingSize     (int)       Number of samples the network has to
                                          be trained on before training the
                                          temporal memory
    @param clsTrainingSize    (int)       Number of samples the network has to
                                          be trained on before training the
                                          classifier
    @param classifierType     (str)       Either "KNN" or "CLA"
    See ClassificationModel for remaining parameters
    """
    self.spTrainingSize = spTrainingSize
    self.tmTrainingSize = tmTrainingSize
    self.clsTrainingSize = clsTrainingSize

    super(ClassificationModelHTM, self).__init__(verbosity=verbosity,
      numLabels=numLabels)

    # Initialize Network
    self.classifierType = classifierType
    self.recordStream = FileRecordStream(streamID=inputFilePath)
    self.encoder = CioEncoder(cacheDir="./experiments/cache")
    self._initModel()
예제 #3
0
 def testCopyOneRow(self):
     expectedOutput = ("Timestamp,Value\n"
                       "datetime,int\n"
                       "T,\n"
                       "2011-09-04 02:00:00.000000,1\n"
                       "2011-09-04 02:05:00.000000,2\n"
                       "2011-09-04 02:10:00.000000,2\n"
                       "2011-09-04 02:15:00.000000,3\n"
                       "2011-09-04 02:20:00.000000,4\n"
                       "2011-09-04 02:25:00.000000,5\n"
                       "2011-09-04 02:30:00.000000,6\n")
     mockInput = MagicMock(return_value=StringIO(self.sampleInput))
     output = StringIO()
     mockOutput = MagicMock(return_value=output)
     with patch("__builtin__.open", mockInput):
         inputFile = FileRecordStream("input_path")
         with patch("__builtin__.open", mockOutput):
             outputFile = FileRecordStream("output_path",
                                           fields=inputFile.getFields(),
                                           write=True)
             anomalyzer.copy(inputFile, outputFile, 1, 1, 1)
     result = output.getvalue()
     result = result.replace("\r\n", "\n")
     result = result.replace("\r", "\n")
     self.assertSequenceEqual(expectedOutput, result)
예제 #4
0
def run(numRecords):
  '''
  Run the Hot Gym example.
  '''

  # Create a data source for the network.
  dataSource = FileRecordStream(streamID=_INPUT_FILE_PATH)
  numRecords = min(numRecords, dataSource.getDataRowCount())
  network = createNetwork(dataSource)

  network.regions["sensor"].getSelf().predictedField = "price"

  # Set predicted field
  network.regions["sensor"].setParameter("predictedField", "price")

  # Enable learning for all regions.
  network.regions["SP"].setParameter("learningMode", 1)
  network.regions["TM"].setParameter("learningMode", 1)
  network.regions["classifier"].setParameter("learningMode", 1)

  # Enable inference for all regions.
  network.regions["SP"].setParameter("inferenceMode", 1)
  network.regions["TM"].setParameter("inferenceMode", 1)
  network.regions["classifier"].setParameter("inferenceMode", 1)

  results = []
  N = _RUN_EPOCH  # Run the network, N iterations at a time.
  graph = Graph({
    'title': 'Bitcoin Prediction',
    'y_label': 'price',
    'y_lim': 'auto',
    'prediction_num': 2,
    'line_labels': ['1-step', '5-step']
  })
  for iteration in range(0, numRecords, N):
    if iteration % _RUN_INTERVAL == 0:
      network.run(N)

      price = network.regions["sensor"].getOutputData("sourceOut")[0]

      predictionResults = getPredictionResults(network, "classifier")
      oneStep = predictionResults[1]["predictedValue"]
      oneStepConfidence = predictionResults[1]["predictionConfidence"]
      fiveStep = predictionResults[5]["predictedValue"]
      fiveStepConfidence = predictionResults[5]["predictionConfidence"]

      result = (oneStep, oneStepConfidence * 100,
                fiveStep, fiveStepConfidence * 100)
      
      if iteration % _PRINT_INTERVAL == 0:
        print "iteration: {}".format(iteration)
        print "1-step: {:16} ({:4.4}%)\t 5-step: {:16} ({:4.4}%)".format(*result)
      
      results.append(result)

      graph.write(price, [oneStep, fiveStep])
  
  graph.close()

  return results
예제 #5
0
 def testCopyOneRow(self):
   expectedOutput = ("Timestamp,Value\n"
                     "datetime,int\n"
                     "T,\n"
                     "2011-09-04 02:00:00.000000,1\n"
                     "2011-09-04 02:05:00.000000,2\n"
                     "2011-09-04 02:10:00.000000,2\n"
                     "2011-09-04 02:15:00.000000,3\n"
                     "2011-09-04 02:20:00.000000,4\n"
                     "2011-09-04 02:25:00.000000,5\n"
                     "2011-09-04 02:30:00.000000,6\n")
   mockInput = MagicMock(return_value=StringIO(self.sampleInput))
   output = StringIO()
   mockOutput = MagicMock(return_value=output)
   with patch("__builtin__.open", mockInput):
     inputFile = FileRecordStream("input_path")
     with patch("__builtin__.open", mockOutput):
       outputFile = FileRecordStream("output_path",
                                     fields=inputFile.getFields(),
                                     write=True)
       anomalyzer.copy(inputFile, outputFile, 1, 1, 1)
   result = output.getvalue()
   result = result.replace("\r\n", "\n")
   result = result.replace("\r", "\n")
   self.assertSequenceEqual(expectedOutput, result)
예제 #6
0
def _createLPFNetwork(addSP=True, addTP=False):
    """Create an 'old-style' network ala LPF and return it."""

    # ==========================================================================
    # Create the encoder and data source stuff we need to configure the sensor
    sensorParams = dict(verbosity=_VERBOSITY)
    encoder = _createEncoder()
    trainFile = findDataset("extra/gym/gym.csv")
    dataSource = FileRecordStream(streamID=trainFile)
    dataSource.setAutoRewind(True)

    # Create all the stuff we need to configure the CLARegion
    g_claConfig["spEnable"] = addSP
    g_claConfig["tpEnable"] = addTP
    claParams = _getCLAParams(encoder=encoder, config=g_claConfig)
    claParams["spSeed"] = g_claConfig["spSeed"]
    claParams["tpSeed"] = g_claConfig["tpSeed"]

    # ==========================================================================
    # Now create the network itself
    n = Network()

    n.addRegion("sensor", "py.RecordSensor", json.dumps(sensorParams))

    sensor = n.regions["sensor"].getSelf()
    sensor.encoder = encoder
    sensor.dataSource = dataSource

    n.addRegion("level1", "py.CLARegion", json.dumps(claParams))

    n.link("sensor", "level1", "UniformLink", "")
    n.link("sensor", "level1", "UniformLink", "", srcOutput="resetOut", destInput="resetIn")

    return n
예제 #7
0
def _createNetwork():
  """Create a network with a RecordSensor region and a SDRClassifier region"""

  network = Network()
  network.addRegion('sensor', 'py.RecordSensor', '{}')
  network.addRegion('classifier', 'py.SDRClassifierRegion', '{}')
  _createSensorToClassifierLinks(network, 'sensor', 'classifier')

  # Add encoder to sensor region.
  sensorRegion = network.regions['sensor'].getSelf()
  encoderParams = {'consumption': {'fieldname': 'consumption',
                                   'resolution': 0.88,
                                   'seed': 1,
                                   'name': 'consumption',
                                   'type': 'RandomDistributedScalarEncoder'}}

  encoder = MultiEncoder()
  encoder.addMultipleEncoders(encoderParams)
  sensorRegion.encoder = encoder

  # Add data source.
  testDir = os.path.dirname(os.path.abspath(__file__))
  inputFile = os.path.join(testDir, 'fixtures', 'gymdata-test.csv')
  dataSource = FileRecordStream(streamID=inputFile)
  sensorRegion.dataSource = dataSource

  # Get and set what field index we want to predict.
  predictedIdx = dataSource.getFieldNames().index('consumption')
  network.regions['sensor'].setParameter('predictedFieldIdx', predictedIdx)

  return network
  def testBadDataset(self):

    filename = _getTempFileName()

    print 'Creating tempfile:', filename

    # Write bad dataset with records going backwards in time
    fields = [('timestamp', 'datetime', 'T')]
    o = FileRecordStream(streamID=filename, write=True, fields=fields)
    # Records
    records = (
      [datetime(day=3, month=3, year=2010)],
      [datetime(day=2, month=3, year=2010)])

    o.appendRecord(records[0])
    o.appendRecord(records[1])
    o.close()

    # Write bad dataset with broken sequences
    fields = [('sid', 'int', 'S')]
    o = FileRecordStream(streamID=filename, write=True, fields=fields)
    # Records
    records = ([1], [2], [1])

    o.appendRecord(records[0])
    o.appendRecord(records[1])
    self.assertRaises(Exception, o.appendRecord, (records[2],))
    o.close()
예제 #9
0
    def test_WeightedMean(self):
        # Cleanup old files
        #for f in glob.glob('*.*'):
        #  if 'auto_specials' in f:
        #    os.remove(f)

        fields = [
            ('dummy1', 'int', ''),
            ('dummy2', 'int', ''),
            ('timestamp', 'datetime', 'T'),
        ]

        records = (
            [10, 1, datetime.datetime(2000, 3, 1)],
            [5, 2, datetime.datetime(2000, 3, 2)],
            [1, 100, datetime.datetime(2000, 3, 3)],
            [2, 4, datetime.datetime(2000, 3, 4)],
            [4, 1, datetime.datetime(2000, 3, 5)],
            [4, 0, datetime.datetime(2000, 3, 6)],
            [5, 0, datetime.datetime(2000, 3, 7)],
            [6, 0, datetime.datetime(2000, 3, 8)],
        )

        if not os.path.isdir('data'):
            os.makedirs('data')

        with FileRecordStream('data/weighted_mean.csv', write=True, fields=fields) \
              as o:
            for r in records:
                o.appendRecord(r)

        # Aggregate just the dummy field, all the specials should be added
        ai = dict(fields=[('dummy1', 'wmean:dummy2', None),
                          ('dummy2', 'mean', None)],
                  days=2)

        handle = \
          tempfile.NamedTemporaryFile(prefix='weighted_mean',
            suffix='.csv',
            dir='.')
        tempFile = handle.name
        handle.close()

        outputFile = generateDataset(ai, 'weighted_mean.csv', tempFile)

        result = []
        with FileRecordStream(outputFile) as f:
            print f.getFields()
            for r in f:
                result.append(r)

        self.assertEqual(result[0][0], 6.0)
        self.assertEqual(result[0][1], 1.0)
        self.assertEqual(result[1][0], 1.0)
        self.assertEqual(result[1][1], 52.0)
        self.assertEqual(result[2][0], 4.0)
        self.assertEqual(result[2][1], 0.0)
        self.assertEqual(result[3][0], None)
        self.assertEqual(result[3][1], 0.0)
        return
예제 #10
0
    def initialize(self):
        """
        Initialize this node.
        """
        Node.initialize(self)

        # Initialize input bits
        self.bits = []
        for x in range(self.width):
            for y in range(self.height):
                bit = Bit()
                bit.x = x
                bit.y = y
                self.bits.append(bit)

        if self.data_source_type == DataSourceType.FILE:
            """
            Initialize this node opening the file and place cursor on the first record.
            """

            # If file name provided is a relative path, use project file path
            if self.file_name != '' and os.path.dirname(self.file_name) == '':
                full_file_name = os.path.dirname(Global.project.file_name) + '/' + self.file_name
            else:
                full_file_name = self.file_name

            # Check if file really exists
            if not os.path.isfile(full_file_name):
                QtWidgets.QMessageBox.warning(None, "Warning", "Input stream file '" + full_file_name + "' was not found or specified.", QtWidgets.QMessageBox.Ok)
                return

            # Create a data source for read the file
            self.data_source = FileRecordStream(full_file_name)

        elif self.data_source_type == DataSourceType.DATABASE:
            pass

        self.encoder = MultiEncoder()
        for encoding in self.encodings:
            encoding.initialize()

            # Create an instance class for an encoder given its module, class and constructor params
            encoding.encoder = getInstantiatedClass(encoding.encoder_module, encoding.encoder_class, encoding.encoder_params)

            # Take the first part of encoder field name as encoder name
            # Ex: timestamp_weekend.weekend => timestamp_weekend
            encoding.encoder.name = encoding.encoder_field_name.split('.')[0]

            # Add sub-encoder to multi-encoder list
            self.encoder.addEncoder(encoding.data_source_field_name, encoding.encoder)

        # If encoder size is not the same to sensor size then throws exception
        encoder_size = self.encoder.getWidth()
        sensor_size = self.width * self.height
        if encoder_size > sensor_size:
            QtWidgets.QMessageBox.warning(None, "Warning", "'" + self.name + "': Encoder size (" + str(encoder_size) + ") is different from sensor size (" + str(self.width) + " x " + str(self.height) + " = " + str(sensor_size) + ").", QtWidgets.QMessageBox.Ok)
            return

        return True
예제 #11
0
def _createOPFNetwork(addSP=True, addTP=False):
    """Create a 'new-style' network ala OPF and return it.
  If addSP is true, an SPRegion will be added named 'level1SP'.
  If addTP is true, a TPRegion will be added named 'level1TP'
  """

    # ==========================================================================
    # Create the encoder and data source stuff we need to configure the sensor
    sensorParams = dict(verbosity=_VERBOSITY)
    encoder = _createEncoder()
    trainFile = findDataset("extra/gym/gym.csv")
    dataSource = FileRecordStream(streamID=trainFile)
    dataSource.setAutoRewind(True)

    # ==========================================================================
    # Now create the network itself
    n = Network()
    n.addRegion("sensor", "py.RecordSensor", json.dumps(sensorParams))

    sensor = n.regions["sensor"].getSelf()
    sensor.encoder = encoder
    sensor.dataSource = dataSource

    # ==========================================================================
    # Add the SP if requested
    if addSP:
        print "Adding SPRegion"
        g_spRegionConfig["inputWidth"] = encoder.getWidth()
        n.addRegion("level1SP", "py.SPRegion", json.dumps(g_spRegionConfig))

        n.link("sensor", "level1SP", "UniformLink", "")
        n.link("sensor", "level1SP", "UniformLink", "", srcOutput="resetOut", destInput="resetIn")
        n.link("level1SP", "sensor", "UniformLink", "", srcOutput="spatialTopDownOut", destInput="spatialTopDownIn")
        n.link("level1SP", "sensor", "UniformLink", "", srcOutput="temporalTopDownOut", destInput="temporalTopDownIn")

    # ==========================================================================
    if addTP and addSP:
        # Add the TP on top of SP if requested
        # The input width of the TP is set to the column count of the SP
        print "Adding TPRegion on top of SP"
        g_tpRegionConfig["inputWidth"] = g_spRegionConfig["columnCount"]
        n.addRegion("level1TP", "py.TPRegion", json.dumps(g_tpRegionConfig))
        n.link("level1SP", "level1TP", "UniformLink", "")
        n.link("level1TP", "level1SP", "UniformLink", "", srcOutput="topDownOut", destInput="topDownIn")
        n.link("sensor", "level1TP", "UniformLink", "", srcOutput="resetOut", destInput="resetIn")

    elif addTP:
        # Add a lone TPRegion if requested
        # The input width of the TP is set to the encoder width
        print "Adding TPRegion"
        g_tpRegionConfig["inputWidth"] = encoder.getWidth()
        n.addRegion("level1TP", "py.TPRegion", json.dumps(g_tpRegionConfig))

        n.link("sensor", "level1TP", "UniformLink", "")
        n.link("sensor", "level1TP", "UniformLink", "", srcOutput="resetOut", destInput="resetIn")

    return n
예제 #12
0
  def train(self, training_file, num_records):
    """Create a network and training it on a CSV data source"""

    dataSource = FileRecordStream(streamID=training_file)
    dataSource.setAutoRewind(True)
    self._network = configureNetwork(dataSource, self.network_config)
    for i in xrange(num_records):  # Equivalent to: network.run(num_records) 
      self._network.run(1)
    self._network.save(self.trained_network_path)
예제 #13
0
    def test_AutoSpecialFields(self):
        # Cleanup old files
        #for f in glob.glob('*.*'):
        #  if 'auto_specials' in f:
        #    os.remove(f)

        fields = [
            ('dummy', 'string', ''),
            ('timestamp', 'datetime', 'T'),
            ('reset', 'int', 'R'),
            ('sid', 'int', 'S'),
        ]

        records = (
            ['dummy-1', datetime.datetime(2000, 3, 1), 1, 1],
            ['dummy-2', datetime.datetime(2000, 3, 2), 0, 1],
            ['dummy-3', datetime.datetime(2000, 3, 3), 0, 1],
            ['dummy-4', datetime.datetime(2000, 3, 4), 1, 2],
            ['dummy-5', datetime.datetime(2000, 3, 5), 0, 2],
        )

        if not os.path.isdir('data'):
            os.makedirs('data')

        with FileRecordStream('data/auto_specials.csv', write=True, fields=fields) \
               as o:
            for r in records:
                o.appendRecord(r)

        # Aggregate just the dummy field, all the specials should be added
        ai = dict(fields=[('dummy', lambda x: x[0])], weeks=3)

        handle = \
          tempfile.NamedTemporaryFile(prefix='auto_specials',
            suffix='.csv',
            dir='.')
        tempFile = handle.name
        handle.close()

        outputFile = generateDataset(ai, 'auto_specials.csv', tempFile)

        result = []
        with FileRecordStream(outputFile) as f:
            print f.getFields()
            for r in f:
                result.append(r)

        self.assertEqual(result[0][2], 1)  # reset
        self.assertEqual(result[0][3], 1)  # seq id
        self.assertEqual(result[0][0], 'dummy-1')
        self.assertEqual(result[1][2], 1)  # reset
        self.assertEqual(result[1][3], 2)  # seq id
        self.assertEqual(result[1][0], 'dummy-4')

        return
def createSensors(network, sensors):
    for sensor in sensors:
        dataSource = FileRecordStream(streamID=sensor["source"])
        dataSource.setAutoRewind(True)
        encoder = MultiEncoder()
        encoder.addMultipleEncoders(fieldEncodings=sensor["encodings"])
        s = createRegion(network, sensor)
        s = s.getSelf()
        s.dataSource = dataSource
        s.encoder = encoder
    return network
예제 #15
0
    def _openStream(self, dataUrl, isBlocking, maxTimeout, bookmark,
                    firstRecordIdx):
        """Open the underlying file stream.

    This only supports 'file://' prefixed paths.
    """
        self._recordStoreName = findDataset(dataUrl[len(FILE_PREF):])
        self._recordStore = FileRecordStream(streamID=self._recordStoreName,
                                             write=False,
                                             bookmark=bookmark,
                                             firstRecord=firstRecordIdx)
예제 #16
0
def runDemo():
  dataSource = FileRecordStream(streamID=_INPUT_FILE_PATH)
  numRecords = dataSource.getDataRowCount()
  print "Creating network"
  network = createNetwork(dataSource)
  outputPath = os.path.join(os.path.dirname(__file__), _OUTPUT_FILE_NAME)
  with open(outputPath, "w") as outputFile:
    writer = csv.writer(outputFile)
    print "Running network"
    print "Writing output to: %s" % outputPath
    runNetwork(network, numRecords, writer)
  print "Hierarchy demo finished"
def runDemo():
    dataSource = FileRecordStream(streamID=_INPUT_FILE_PATH)
    numRecords = dataSource.getDataRowCount()
    print "Creating network"
    network = createNetwork(dataSource)
    outputPath = os.path.join(os.path.dirname(__file__), _OUTPUT_FILE_NAME)
    with open(outputPath, "w") as outputFile:
        writer = csv.writer(outputFile)
        print "Running network"
        print "Writing output to: %s" % outputPath
        runNetwork(network, numRecords, writer)
    print "Hierarchy demo finished"
예제 #18
0
    def _openStream(self, dataUrl, isBlocking, maxTimeout, bookmark,
                    firstRecordIdx):
        """Open the underlying file stream.

    This only supports 'file://' prefixed paths.
    """
        filePath = dataUrl[len(FILE_PREF):]
        if not os.path.isabs(filePath):
            filePath = os.path.join(os.getcwd(), filePath)
        self._recordStoreName = filePath
        self._recordStore = FileRecordStream(streamID=self._recordStoreName,
                                             write=False,
                                             bookmark=bookmark,
                                             firstRecord=firstRecordIdx)
예제 #19
0
def run(numRecords):
    '''
  Run the Hot Gym example.
  '''

    # Create a data source for the network.
    dataSource = FileRecordStream(streamID=_INPUT_FILE_PATH)
    numRecords = min(numRecords, dataSource.getDataRowCount())
    network = createNetwork(dataSource)

    network.regions["sensor"].getSelf().predictedField = "sine"

    # Set predicted field
    network.regions["sensor"].setParameter("predictedField", "sine")

    # Enable learning for all regions.
    network.regions["SP"].setParameter("learningMode", 1)
    network.regions["TM"].setParameter("learningMode", 1)
    network.regions["classifier"].setParameter("learningMode", 1)

    # Enable inference for all regions.
    network.regions["SP"].setParameter("inferenceMode", 1)
    network.regions["TM"].setParameter("inferenceMode", 1)
    network.regions["classifier"].setParameter("inferenceMode", 1)

    results = []
    N = 1  # Run the network, N iterations at a time.
    output = nupic_output.NuPICPlotOutput("Sine", show_anomaly_score=True)
    for iteration in range(0, numRecords, N):
        network.run(N)

        sine = network.regions["sensor"].getOutputData("sourceOut")[0]

        predictionResults = getPredictionResults(network, "classifier")
        oneStep = predictionResults[1]["predictedValue"]
        oneStepConfidence = predictionResults[1]["predictionConfidence"]
        fiveStep = predictionResults[10]["predictedValue"]
        fiveStepConfidence = predictionResults[10]["predictionConfidence"]

        result = (oneStep, oneStepConfidence * 100, fiveStep,
                  fiveStepConfidence * 100)
        print "1-step: {:16} ({:4.4}%)\t 10-step: {:16} ({:4.4}%)".format(
            *result)
        results.append(result)

        output.write(sine, oneStep, 0)

    output.close()

    return results
예제 #20
0
def main(args):
    inputPath, outputPath, action = args[:3]
    with FileRecordStream(inputPath) as reader:
        with FileRecordStream(outputPath, write=True,
                              fields=reader.fields) as writer:
            assert action in Actions.ACTIONS, USAGE
            if action == Actions.ADD:
                assert len(args) == 7, USAGE
                start = int(args[4])
                stop = int(args[5])
                column = int(args[3])
                valueType = eval(reader.fields[column][1])
                value = valueType(args[6])
                add(reader, writer, column, start, stop, value)
            elif action == Actions.SCALE:
                assert len(args) == 7, USAGE
                start = int(args[4])
                stop = int(args[5])
                column = int(args[3])
                valueType = eval(reader.fields[column][1])
                multiple = valueType(args[6])
                scale(reader, writer, column, start, stop, multiple)
            elif action == Actions.COPY:
                assert 5 <= len(args) <= 8, USAGE
                start = int(args[3])
                stop = int(args[4])
                if len(args) > 5:
                    insertLocation = int(args[5])
                else:
                    insertLocation = None
                if len(args) == 7:
                    tsCol = int(args[6])
                else:
                    tsCol = None
                copy(reader, writer, start, stop, insertLocation, tsCol)
            elif action == Actions.SAMPLE or action == Actions.SAMPLE2:
                assert 4 <= len(args) <= 7, USAGE
                n = int(args[3])
                start = None
                if len(args) > 4:
                    start = int(args[4])
                stop = None
                if len(args) > 5:
                    stop = int(args[5])
                tsCol = None
                if len(args) > 6:
                    tsCol = int(args[6])
                writeSampleOnly = action == Actions.SAMPLE
                sample(reader, writer, n, start, stop, tsCol, writeSampleOnly)
예제 #21
0
def createAndRunNetwork(testRegionType,
                        testOutputName,
                        checkpointMidway=False,
                        temporalImp=None):
    dataSource = FileRecordStream(streamID=_INPUT_FILE_PATH)

    if temporalImp is None:
        network = createNetwork(dataSource)
    else:
        network = createNetwork(dataSource,
                                enableTP=True,
                                temporalImp=temporalImp)
    network.initialize()

    results = []

    for i in xrange(_NUM_RECORDS):
        if checkpointMidway and i == (_NUM_RECORDS / 2):
            network = saveAndLoadNetwork(network)

        # Run the network for a single iteration
        network.run(1)

        testRegion = network.getRegionsByType(testRegionType)[0]
        output = testRegion.getOutputData(testOutputName).copy()
        results.append(output)

    return results
예제 #22
0
def aggregate(dataPath, outputPath, days=0, hours=0):
  with FileRecordStream(dataPath) as reader:
    aggregator = Aggregator({'fields': [('messages', 'sum')],
                             'days': days,
                             'hours': hours},
                            reader.getFields())

    with open(outputPath, 'w') as outfile:
      writer = csv.writer(outfile)

      writer.writerow(['timestamp', 'messages'])
      writer.writerow(['datetime', 'int'])
      writer.writerow(['T', ''])

      while True:
        inRecord = reader.getNextRecord()
        bookmark = reader.getBookmark()

        (aggRecord, aggBookmark) = aggregator.next(inRecord, bookmark)

        # reached EOF?
        if inRecord is None and aggRecord is None:
          break

        if aggRecord is not None:
          timestamp = aggRecord[0].strftime('%Y-%m-%d %H:%M:%S.0')
          writer.writerow([timestamp, aggRecord[1]])
예제 #23
0
파일: sorter.py 프로젝트: ldd2816/nupic
def _sortChunk(records, key, chunkIndex, fields):
  """Sort in memory chunk of records

  records - a list of records read from the original dataset
  key - a list of indices to sort the records by
  chunkIndex - the index of the current chunk

  The records contain only the fields requested by the user.

  _sortChunk() will write the sorted records to a standard File
  named "chunk_<chunk index>.csv" (chunk_0.csv, chunk_1.csv,...).
  """
  title(additional='(key=%s, chunkIndex=%d)' % (str(key), chunkIndex))

  assert len(records) > 0

  # Sort the current records
  records.sort(key=itemgetter(*key))

  # Write to a chunk file
  if chunkIndex is not None:
    filename = 'chunk_%d.csv' % chunkIndex
    with FileRecordStream(filename, write=True, fields=fields) as o:
      for r in records:
        o.appendRecord(r)

    assert os.path.getsize(filename) > 0

  return records
예제 #24
0
  def initialize(self):
    """
    Initialize this node.
    """

    Node.initialize(self)

    # Initialize input bits
    self.bits = []
    for x in range(self.width):
      for y in range(self.height):
        bit = Bit()
        bit.x = x
        bit.y = y
        self.bits.append(bit)

    if self.dataSourceType == DataSourceType.file:
      """
      Initialize this node opening the file and place cursor on the first record.
      """

      # If file name provided is a relative path, use project file path
      if self.fileName != '' and os.path.dirname(self.fileName) == '':
        fullFileName = os.path.dirname(Global.project.fileName) + '/' + self.fileName
      else:
        fullFileName = self.fileName

      # Check if file really exists
      if not os.path.isfile(fullFileName):
        QtGui.QMessageBox.warning(None, "Warning", "Input stream file '" + fullFileName + "' was not found or specified.", QtGui.QMessageBox.Ok)
        return

      # Create a data source for read the file
      self.dataSource = FileRecordStream(fullFileName)

    elif self.dataSourceType == DataSourceType.database:
      pass

    self.encoder = MultiEncoder()
    for encoding in self.encodings:
      encoding.initialize()

      # Create an instance class for an encoder given its module, class and constructor params
      encoding.encoder = getInstantiatedClass(encoding.encoderModule, encoding.encoderClass, encoding.encoderParams)

      # Take the first part of encoder field name as encoder name
      # Ex: timestamp_weekend.weekend => timestamp_weekend
      encoding.encoder.name = encoding.encoderFieldName.split('.')[0]

      # Add sub-encoder to multi-encoder list
      self.encoder.addEncoder(encoding.dataSourceFieldName, encoding.encoder)

    # If encoder size is not the same to sensor size then throws exception
    encoderSize = self.encoder.getWidth()
    sensorSize = self.width * self.height
    if encoderSize > sensorSize:
      QtGui.QMessageBox.warning(None, "Warning", "'" + self.name + "': Encoder size (" + str(encoderSize) + ") is different from sensor size (" + str(self.width) + " x " + str(self.height) + " = " + str(sensorSize) + ").", QtGui.QMessageBox.Ok)
      return

    return True
예제 #25
0
def _createNetwork():
  """Create network with one RecordSensor region."""
  network = Network()
  network.addRegion('sensor', 'py.RecordSensor', '{}')
  sensorRegion = network.regions['sensor'].getSelf()

  # Add an encoder.
  encoderParams = {'consumption': {'fieldname': 'consumption',
                                   'resolution': 0.88,
                                   'seed': 1,
                                   'name': 'consumption',
                                   'type': 'RandomDistributedScalarEncoder'}}

  encoder = MultiEncoder()
  encoder.addMultipleEncoders(encoderParams)
  sensorRegion.encoder = encoder

  # Add a data source.
  testDir = os.path.dirname(os.path.abspath(__file__))
  inputFile = os.path.join(testDir, 'fixtures', 'gymdata-test.csv')
  dataSource = FileRecordStream(streamID=inputFile)
  sensorRegion.dataSource = dataSource

  # Get and set what field index we want to predict.
  network.regions['sensor'].setParameter('predictedField', 'consumption')

  return network
예제 #26
0
    def test_GenerateDataset(self):
        dataset = 'extra/gym/gym.csv'

        print "Using input dataset: ", dataset

        gymFileds = None
        with FileRecordStream(findDataset(dataset)) as f:
            gymFields = f.getFieldNames()

        aggregationOptions = dict(timeField=gymFields.index('timestamp'),
                                  fields=[('attendeeCount', sum),
                                          ('consumption', sum),
                                          ('timestamp', lambda x: x[0])],
                                  hours=5)

        handle = \
          tempfile.NamedTemporaryFile(prefix='agg_gym_hours_5',
            suffix='.csv',
            dir=os.path.dirname(findDataset(dataset)))
        outputFile = handle.name
        handle.close()

        print "Expected outputFile path: ", outputFile

        print "Files in the destination folder before the test:"
        print os.listdir(os.path.abspath(os.path.dirname(
            findDataset(dataset))))

        if os.path.isfile(outputFile):
            print "Removing existing outputFile: ", outputFile
            os.remove(outputFile)

        self.assertFalse(os.path.exists(outputFile),
                         msg="Shouldn't exist, but does: " + str(outputFile))

        result = generateDataset(aggregationOptions, dataset, outputFile)
        print "generateDataset() returned: ", result

        f1 = os.path.abspath(os.path.normpath(result))
        print "normalized generateDataset() result path: ", f1
        f2 = os.path.normpath(outputFile)
        print "normalized outputFile path: ", f2
        self.assertEqual(f1, f2)

        print "Checking for presence of outputFile: ", outputFile
        self.assertTrue(
            os.path.isfile(outputFile),
            msg=
            "Missing outputFile: %r; normalized generateDataset() result: %r" %
            (outputFile, f1))

        print "Files in the destination folder after the test:"
        print os.listdir(os.path.abspath(os.path.dirname(
            findDataset(dataset))))

        print result
        print '-' * 30

        return
예제 #27
0
    def initModel(self):
        """
    Initialize the network; self.networdDataPath must already be set.
    """
        recordStream = FileRecordStream(streamID=self.networkDataPath)
        encoder = CioEncoder(cacheDir="./experiments/cache")

        return configureNetwork(recordStream, self.networkConfig, encoder)
예제 #28
0
def _generateScalar(filename="simple.csv", numSequences=2, elementsPerSeq=1, 
                    numRepeats=10, stepSize=0.1, resets=False):
  """ Generate a simple dataset. This contains a bunch of non-overlapping
  sequences of scalar values. 
  
  Parameters:
  ----------------------------------------------------
  filename:       name of the file to produce, including extension. It will
                  be created in a 'datasets' sub-directory within the 
                  directory containing this script. 
  numSequences:   how many sequences to generate
  elementsPerSeq: length of each sequence
  numRepeats:     how many times to repeat each sequence in the output
  stepSize:       how far apart each scalar is 
  resets:         if True, turn on reset at start of each sequence
  """
  
  # Create the output file
  scriptDir = os.path.dirname(__file__)
  pathname = os.path.join(scriptDir, 'datasets', filename)
  print "Creating %s..." % (pathname)
  fields = [('reset', 'int', 'R'), ('category', 'int', 'C'),
            ('field1', 'float', '')]  
  outFile = FileRecordStream(pathname, write=True, fields=fields)
  
  # Create the sequences
  sequences = []
  for i in range(numSequences):
    seq = [x for x in range(i*elementsPerSeq, (i+1)*elementsPerSeq)]
    sequences.append(seq)
  
  # Write out the sequences in random order
  seqIdxs = []
  for i in range(numRepeats):
    seqIdxs += range(numSequences)
  random.shuffle(seqIdxs)
  
  for seqIdx in seqIdxs:
    reset = int(resets)
    seq = sequences[seqIdx]
    for x in seq:
      outFile.appendRecord([reset, str(seqIdx), x*stepSize])
      reset = 0

  outFile.close()
예제 #29
0
    def _openStream(self, dataUrl, isBlocking, maxTimeout, bookmark, firstRecordIdx):
        """Open the underlying file stream.

    This only supports 'file://' prefixed paths.
    """
        self._recordStoreName = findDataset(dataUrl[len(FILE_PREF) :])
        self._recordStore = FileRecordStream(
            streamID=self._recordStoreName, write=False, bookmark=bookmark, firstRecord=firstRecordIdx
        )
예제 #30
0
def _generateScalar(filename="simple.csv",
                    numSequences=2,
                    elementsPerSeq=1,
                    numRepeats=10,
                    stepSize=0.1,
                    includeRandom=False):
    """ Generate a simple dataset. This contains a bunch of non-overlapping
  sequences of scalar values. 
  
  Parameters:
  ----------------------------------------------------
  filename:       name of the file to produce, including extension. It will
                  be created in a 'datasets' sub-directory within the 
                  directory containing this script. 
  numSequences:   how many sequences to generate
  elementsPerSeq: length of each sequence
  numRepeats:     how many times to repeat each sequence in the output
  stepSize:       how far apart each scalar is 
  includeRandom:  if true, include another random field
  """

    # Create the output file
    scriptDir = os.path.dirname(__file__)
    pathname = os.path.join(scriptDir, 'datasets', filename)
    print "Creating %s..." % (pathname)
    fields = [('classification', 'float', ''), ('field1', 'float', '')]
    if includeRandom:
        fields += [('randomData', 'float', '')]

    outFile = FileRecordStream(pathname, write=True, fields=fields)

    # Create the sequences
    sequences = []
    for i in range(numSequences):
        seq = [x for x in range(i * elementsPerSeq, (i + 1) * elementsPerSeq)]
        sequences.append(seq)

    random.seed(42)

    # Write out the sequences in random order
    seqIdxs = []
    for i in range(numRepeats):
        seqIdxs += range(numSequences)
    random.shuffle(seqIdxs)

    for seqIdx in seqIdxs:
        seq = sequences[seqIdx]
        for x in seq:
            if includeRandom:
                outFile.appendRecord([seqIdx, x * stepSize, random.random()])
            else:
                outFile.appendRecord([seqIdx, x * stepSize])

    outFile.close()
예제 #31
0
  def test_GymAggregate(self):
    filename = resource_filename(
      "nupic.datafiles", "extra/gym/gym.csv"
    )

    input = []

    gymFields = None

    with FileRecordStream(filename) as f:
      gymFields = f.getFields()
      for i in range(10):
        input.append(f.getNextRecord())

    for h in (1,3):
      aggregationOptions = dict(
        fields=[
          ('timestamp', lambda x: x[0],),
          ('attendeeCount', sum),
          ('consumption', sum)],
        hours=h
      )


      handle = \
        tempfile.NamedTemporaryFile(prefix='test', 
          suffix='.bin')
      outputFile = handle.name
      handle.close()
      
      dataInput = DataInputList(input, gymFields)
      dataOutput = DataOutputMyFile(FileRecordStream(outputFile, write=True,
                                                     fields=gymFields))

      _aggregate(input=dataInput, options=aggregationOptions, 
                 timeFieldName='timestamp', output=dataOutput)

      dataOutput.close()

      for r in FileRecordStream(outputFile):
        print(r)
      print('-' * 30)

    return
예제 #32
0
 def testSample(self):
     mockInput = MagicMock(return_value=StringIO(self.sampleInput))
     output = StringIO()
     mockOutput = MagicMock(return_value=output)
     with patch("__builtin__.open", mockInput):
         inputFile = FileRecordStream("input_path")
         with patch("__builtin__.open", mockOutput):
             outputFile = FileRecordStream("output_path",
                                           fields=inputFile.getFields(),
                                           write=True)
             anomalyzer.sample(inputFile, outputFile, 1)
     result = StringIO(output.getvalue())
     result.next()
     result.next()
     result.next()
     reader = csv.reader(result)
     _, value = reader.next()
     self.assertIn(int(value), (1, 2, 3, 4, 5, 6))
     self.assertRaises(StopIteration, result.next)
예제 #33
0
파일: sorter.py 프로젝트: yangzxstar/nupic
def _mergeFiles(key, chunkCount, outputFile, fields):
    """Merge sorted chunk files into a sorted output file

  chunkCount - the number of available chunk files
  outputFile the name of the sorted output file

  _mergeFiles()

  """
    title()

    # Open all chun files
    files = [FileRecordStream('chunk_%d.csv' % i) for i in range(chunkCount)]

    # Open output file
    with FileRecordStream(outputFile, write=True, fields=fields) as o:
        # Open all chunk files
        files = [
            FileRecordStream('chunk_%d.csv' % i) for i in range(chunkCount)
        ]
        records = [f.getNextRecord() for f in files]

        # This loop will run until all files are exhausted
        while not all(r is None for r in records):
            # Cleanup None values (files that were exhausted)
            indices = [i for i, r in enumerate(records) if r is not None]
            records = [records[i] for i in indices]
            files = [files[i] for i in indices]

            # Find the current record
            r = min(records, key=itemgetter(*key))
            # Write it to the file
            o.appendRecord(r)

            # Find the index of file that produced the current record
            index = records.index(r)
            # Read a new record from the file
            records[index] = files[index].getNextRecord()

    # Cleanup chunk files
    for i, f in enumerate(files):
        f.close()
        os.remove('chunk_%d.csv' % i)
예제 #34
0
 def testSample(self):
   mockInput = MagicMock(return_value=StringIO(self.sampleInput))
   output = StringIO()
   mockOutput = MagicMock(return_value=output)
   with patch("__builtin__.open", mockInput):
     inputFile = FileRecordStream("input_path")
     with patch("__builtin__.open", mockOutput):
       outputFile = FileRecordStream("output_path",
                                     fields=inputFile.getFields(),
                                     write=True)
       anomalyzer.sample(inputFile, outputFile, 1)
   result = StringIO(output.getvalue())
   result.next()
   result.next()
   result.next()
   reader = csv.reader(result)
   _, value = reader.next()
   self.assertIn(int(value), (1, 2, 3, 4, 5, 6))
   self.assertRaises(StopIteration, result.next)
예제 #35
0
def run():
    """ Run classification network(s) on artificial sensor data """
    with open("network_config_template.json", "rb") as jsonFile:
        templateNetworkConfig = json.load(jsonFile)

    networkConfigurations = generateSampleNetworkConfig(
        templateNetworkConfig, NUM_CATEGORIES)

    for networkConfig in networkConfigurations:
        for noiseAmplitude in WHITE_NOISE_AMPLITUDES:
            for signalMean in SIGNAL_MEANS:
                for signalAmplitude in SIGNAL_AMPLITUDES:
                    for signalPeriod in SIGNAL_PERIODS:
                        sensorType = networkConfig["sensorRegionConfig"].get(
                            "regionType")
                        spEnabled = networkConfig["sensorRegionConfig"].get(
                            "regionEnabled")
                        tmEnabled = networkConfig["tmRegionConfig"].get(
                            "regionEnabled")
                        upEnabled = networkConfig["tpRegionConfig"].get(
                            "regionEnabled")
                        classifierType = networkConfig[
                            "classifierRegionConfig"].get("regionType")

                        expParams = (
                            "RUNNING EXPERIMENT WITH PARAMS:\n"
                            " * numRecords=%s\n"
                            " * signalAmplitude=%s\n"
                            " * signalMean=%s\n"
                            " * signalPeriod=%s\n"
                            " * noiseAmplitude=%s\n"
                            " * sensorType=%s\n"
                            " * spEnabled=%s\n"
                            " * tmEnabled=%s\n"
                            " * tpEnabled=%s\n"
                            " * classifierType=%s\n") % (
                                NUM_RECORDS, signalAmplitude, signalMean,
                                signalPeriod, noiseAmplitude,
                                sensorType.split(".")[1], spEnabled, tmEnabled,
                                upEnabled, classifierType.split(".")[1])
                        print expParams

                        inputFile = generateSensorData(
                            DATA_DIR, OUTFILE_NAME, signalMean, signalPeriod,
                            SEQUENCE_LENGTH, NUM_RECORDS, signalAmplitude,
                            NUM_CATEGORIES, noiseAmplitude)

                        dataSource = FileRecordStream(streamID=inputFile)
                        network = configureNetwork(dataSource, networkConfig)
                        partitions = generateNetworkPartitions(
                            networkConfig, NUM_RECORDS)

                        trainNetwork(network, networkConfig, partitions,
                                     NUM_RECORDS)
예제 #36
0
def _generateCategory(filename="simple.csv", numSequences=2, elementsPerSeq=1, 
                    numRepeats=10):
  """ Generate a simple dataset. This contains a bunch of non-overlapping
  sequences. 
  
  Parameters:
  ----------------------------------------------------
  filename:       name of the file to produce, including extension. It will
                  be created in a 'datasets' sub-directory within the 
                  directory containing this script. 
  numSequences:   how many sequences to generate
  elementsPerSeq: length of each sequence
  numRepeats:     how many times to repeat each sequence in the output 
  """
  
  # Create the output file
  scriptDir = os.path.dirname(__file__)
  pathname = os.path.join(scriptDir, 'datasets', filename)
  print "Creating %s..." % (pathname)
  fields = [('classification', 'string', ''), 
            ('field1', 'string', '')]  
  outFile = FileRecordStream(pathname, write=True, fields=fields)
  
  # Create the sequences
  sequences = []
  for i in range(numSequences):
    seq = [x for x in range(i*elementsPerSeq, (i+1)*elementsPerSeq)]
    sequences.append(seq)
  
  # Write out the sequences in random order
  seqIdxs = []
  for i in range(numRepeats):
    seqIdxs += range(numSequences)
  random.shuffle(seqIdxs)
  
  for seqIdx in seqIdxs:
    seq = sequences[seqIdx]
    for x in seq:
      outFile.appendRecord([str(seqIdx), str(x)])

  outFile.close()
예제 #37
0
  def test_GymAggregateWithOldData(self):
    filename = resource_filename(
      "nupic.datafiles", "extra/gym/gym.csv"
    )

    input = []

    gymFields = None

    with FileRecordStream(filename) as f:
      gymFields = f.getFields()
      for i in range(10):
        input.append(f.getNextRecord())

    #Append the records from the beginning to the end of the dataset
    input.extend(input[0:3])
    for h in (1,3):
      aggregationOptions = dict(
        fields=[
          ('timestamp', lambda x: x[0],),
          ('attendeeCount', sum),
          ('consumption', sum)],
        hours=h
      )


      handle = \
        tempfile.NamedTemporaryFile(prefix='test', 
          suffix='.bin')
      outputFile = handle.name
      handle.close()
      
      dataInput = DataInputList(input, gymFields)
      dataOutput = DataOutputList(None)

      _aggregate(input=dataInput, options=aggregationOptions, 
                 timeFieldName='timestamp', output=dataOutput)
      dataOutput.close()

      outputRecords = dataOutput._store
      
      timeFieldIdx = [f[0] for f in gymFields].index('timestamp')
      diffs = []
      for i in range(1,len(outputRecords)):
        diffs.append(outputRecords[i][timeFieldIdx] - \
                     outputRecords[i-1][timeFieldIdx])
      positiveTimeFlow = list(map((lambda x: x < datetime.timedelta(seconds=0)), 
                            diffs))
      #Make sure that old records are in the aggregated output and at the same
      #time make sure that they are in consecutive order after being inserted
      self.assertEqual(sum(positiveTimeFlow), 1)
        
    return
예제 #38
0
    def testFileRecordStreamReadData(self):
        ndg = NetworkDataGenerator()
        filename = os.path.join(self.dirName, "test_data/multi_sample.csv")
        ndg.split(filename, 3, False)
        dataOutputFile = os.path.join(self.dirName,
                                      "test_data/multi_sample_split.csv")
        categoriesOutputFile = os.path.join(
            self.dirName, "test_data/multi_sample_categories.json")
        ndg.saveData(dataOutputFile, categoriesOutputFile)

        # If no error is raised, then the data is in the correct format
        frs = FileRecordStream(dataOutputFile)
예제 #39
0
def runHotgym(numRecords):
    """Run the Hot Gym example."""

    # Create a data source for the network.
    dataSource = FileRecordStream(streamID=_INPUT_FILE_PATH)
    numRecords = min(numRecords, dataSource.getDataRowCount())
    network = createNetwork(dataSource)

    # Set predicted field index. It needs to be the same index as the data source.
    predictedIdx = dataSource.getFieldNames().index("consumption")
    network.regions["sensor"].setParameter("predictedFieldIdx", predictedIdx)

    # Enable learning for all regions.
    network.regions["SP"].setParameter("learningMode", 1)
    network.regions["TM"].setParameter("learningMode", 1)
    network.regions["classifier"].setParameter("learningMode", 1)

    # Enable inference for all regions.
    network.regions["SP"].setParameter("inferenceMode", 1)
    network.regions["TM"].setParameter("inferenceMode", 1)
    network.regions["classifier"].setParameter("inferenceMode", 1)

    results = []
    N = 1  # Run the network, N iterations at a time.
    for iteration in range(0, numRecords, N):
        network.run(N)

        predictionResults = getPredictionResults(network, "classifier")
        oneStep = predictionResults[1]["predictedValue"]
        oneStepConfidence = predictionResults[1]["predictionConfidence"]
        fiveStep = predictionResults[5]["predictedValue"]
        fiveStepConfidence = predictionResults[5]["predictionConfidence"]

        result = (oneStep, oneStepConfidence * 100, fiveStep,
                  fiveStepConfidence * 100)
        print "1-step: {:16} ({:4.4}%)\t 5-step: {:16} ({:4.4}%)".format(
            *result)
        results.append(result)

    return results
예제 #40
0
def runHotgym(numRecords):
  """Run the Hot Gym example."""

  # Create a data source for the network.
  dataSource = FileRecordStream(streamID=_INPUT_FILE_PATH)
  numRecords = min(numRecords, dataSource.getDataRowCount())
  network = createNetwork(dataSource)

  # Set predicted field index. It needs to be the same index as the data source.
  predictedIdx = dataSource.getFieldNames().index("consumption")
  network.regions["sensor"].setParameter("predictedFieldIdx", predictedIdx)

  # Enable learning for all regions.
  network.regions["SP"].setParameter("learningMode", 1)
  network.regions["TM"].setParameter("learningMode", 1)
  network.regions["classifier"].setParameter("learningMode", 1)

  # Enable inference for all regions.
  network.regions["SP"].setParameter("inferenceMode", 1)
  network.regions["TM"].setParameter("inferenceMode", 1)
  network.regions["classifier"].setParameter("inferenceMode", 1)

  results = []
  N = 1  # Run the network, N iterations at a time.
  for iteration in range(0, numRecords, N):
    network.run(N)

    predictionResults = getPredictionResults(network, "classifier")
    oneStep = predictionResults[1]["predictedValue"]
    oneStepConfidence = predictionResults[1]["predictionConfidence"]
    fiveStep = predictionResults[5]["predictedValue"]
    fiveStepConfidence = predictionResults[5]["predictionConfidence"]

    result = (oneStep, oneStepConfidence * 100,
              fiveStep, fiveStepConfidence * 100)
    print "1-step: {:16} ({:4.4}%)\t 5-step: {:16} ({:4.4}%)".format(*result)
    results.append(result)

  return results
예제 #41
0
  def _openStream(self, dataUrl, isBlocking, maxTimeout, bookmark,
                  firstRecordIdx):
    """Open the underlying file stream.

    This only supports 'file://' prefixed paths.
    """
    filePath = dataUrl[len(FILE_PREF):]
    if not os.path.isabs(filePath):
      filePath = os.path.join(os.getcwd(), filePath)
    self._recordStoreName = filePath 
    self._recordStore = FileRecordStream(streamID=self._recordStoreName,
                                         write=False,
                                         bookmark=bookmark,
                                         firstRecord=firstRecordIdx)
예제 #42
0
def _generateScalar(
    filename="simple.csv", numSequences=2, elementsPerSeq=1, numRepeats=10, stepSize=0.1, includeRandom=False
):
    """ Generate a simple dataset. This contains a bunch of non-overlapping
  sequences of scalar values. 
  
  Parameters:
  ----------------------------------------------------
  filename:       name of the file to produce, including extension. It will
                  be created in a 'datasets' sub-directory within the 
                  directory containing this script. 
  numSequences:   how many sequences to generate
  elementsPerSeq: length of each sequence
  numRepeats:     how many times to repeat each sequence in the output
  stepSize:       how far apart each scalar is 
  includeRandom:  if true, include another random field
  """

    # Create the output file
    scriptDir = os.path.dirname(__file__)
    pathname = os.path.join(scriptDir, "datasets", filename)
    print "Creating %s..." % (pathname)
    fields = [("classification", "float", ""), ("field1", "float", "")]
    if includeRandom:
        fields += [("randomData", "float", "")]

    outFile = FileRecordStream(pathname, write=True, fields=fields)

    # Create the sequences
    sequences = []
    for i in range(numSequences):
        seq = [x for x in range(i * elementsPerSeq, (i + 1) * elementsPerSeq)]
        sequences.append(seq)

    random.seed(42)

    # Write out the sequences in random order
    seqIdxs = []
    for i in range(numRepeats):
        seqIdxs += range(numSequences)
    random.shuffle(seqIdxs)

    for seqIdx in seqIdxs:
        seq = sequences[seqIdx]
        for x in seq:
            if includeRandom:
                outFile.appendRecord([seqIdx, x * stepSize, random.random()])
            else:
                outFile.appendRecord([seqIdx, x * stepSize])

    outFile.close()
예제 #43
0
파일: sorter.py 프로젝트: ldd2816/nupic
def writeTestFile(testFile, fields, big):
  if big:
    print 'Creating big test file (763MB)...'
    payload = 'x' * 10 ** 8
  else:
    print 'Creating a small big test file...'
    payload = 'x' * 3
  with FileRecordStream(testFile, write=True, fields=fields) as o:
    print '.'; o.appendRecord([1,3,6, payload])
    print '.'; o.appendRecord([2,3,6, payload])
    print '.'; o.appendRecord([1,4,6, payload])
    print '.'; o.appendRecord([2,4,6, payload])
    print '.'; o.appendRecord([1,3,5, payload])
    print '.'; o.appendRecord([2,3,5, payload])
    print '.'; o.appendRecord([1,4,5, payload])
    print '.'; o.appendRecord([2,4,5, payload])
예제 #44
0
    def testSaveAndReload(self):
        """
    This function tests saving and loading. It will train a network for 500
    iterations, then save it and reload it as a second network instance. It will
    then run both networks for 100 iterations and ensure they return identical
    results.
    """

        print "Creating network..."

        netOPF = _createOPFNetwork()
        level1OPF = netOPF.regions['level1SP']

        # ==========================================================================
        print "Training network for 500 iterations"
        level1OPF.setParameter('learningMode', 1)
        level1OPF.setParameter('inferenceMode', 0)
        netOPF.run(500)
        level1OPF.setParameter('learningMode', 0)
        level1OPF.setParameter('inferenceMode', 1)

        # ==========================================================================
        # Save network and reload as a second instance. We need to reset the data
        # source for the unsaved network so that both instances start at the same
        # place
        print "Saving and reload network"
        _, tmpNetworkFilename = _setupTempDirectory("trained.nta")
        netOPF.save(tmpNetworkFilename)
        netOPF2 = Network(tmpNetworkFilename)
        level1OPF2 = netOPF2.regions['level1SP']

        sensor = netOPF.regions['sensor'].getSelf()
        trainFile = resource_filename("nupic.datafiles", "extra/gym/gym.csv")
        sensor.dataSource = FileRecordStream(streamID=trainFile)
        sensor.dataSource.setAutoRewind(True)

        # ==========================================================================
        print "Running inference on the two networks for 100 iterations"
        for _ in xrange(100):
            netOPF2.run(1)
            netOPF.run(1)
            l1outputOPF2 = level1OPF2.getOutputData("bottomUpOut")
            l1outputOPF = level1OPF.getOutputData("bottomUpOut")
            opfHash2 = l1outputOPF2.nonzero()[0].sum()
            opfHash = l1outputOPF.nonzero()[0].sum()

            self.assertEqual(opfHash2, opfHash)
예제 #45
0
    def initModel(self):
        """
    Initialize the network; self.networdDataPath must already be set.
    """
        if self.networkDataPath is not None:
            recordStream = FileRecordStream(streamID=self.networkDataPath)
        else:
            recordStream = None

        root = os.path.dirname(os.path.realpath(__file__))
        encoder = CioEncoder(retinaScaling=self.retinaScaling,
                             cacheDir=os.path.join(root, "CioCache"),
                             retina=self.retina,
                             apiKey=self.apiKey)

        # This encoder specifies the LanguageSensor output width.
        return configureNetwork(recordStream, self.networkConfig, encoder)
예제 #46
0
  def __init__(self, streamDef, bookmark=None, saveOutput=False,
               isBlocking=True, maxTimeout=0, eofOnTimeout=False):
    """ Base class constructor, performs common initialization

    Parameters:
    ----------------------------------------------------------------
    streamDef:  The stream definition, potentially containing multiple sources
                (not supported yet). See
                /nupic/frameworks/opf/jsonschema/stream_def.json for the format
                of this dict

    bookmark: Bookmark to start reading from. This overrides the first_record
                field of the streamDef if provided.

    saveOutput: If true, save the output to a csv file in a temp directory.
                The path to the generated file can be found in the log
                output.

    isBlocking: should read operation block *forever* if the next row of data
                is not available, but the stream is not marked as 'completed'
                yet?

    maxTimeout: if isBlocking is False, max seconds to wait for more data before
                timing out; ignored when isBlocking is True.

    eofOnTimeout: If True and we get a read timeout (isBlocking must be False
                to get read timeouts), assume we've reached the end of the
                input and produce the last aggregated record, if one can be
                completed.

    """

    # Call superclass constructor
    super(StreamReader, self).__init__()

    loggerPrefix = 'com.numenta.nupic.data.StreamReader'
    self._logger = logging.getLogger(loggerPrefix)
    jsonhelpers.validate(streamDef,
                         schemaPath=pkg_resources.resource_filename(
                             jsonschema.__name__, "stream_def.json"))
    assert len(streamDef['streams']) == 1, "Only 1 source stream is supported"

    # Save constructor args
    sourceDict = streamDef['streams'][0]
    self._recordCount = 0
    self._eofOnTimeout = eofOnTimeout
    self._logger.debug('Reading stream with the def: %s', sourceDict)

    # Dictionary to store record statistics (min and max of scalars for now)
    self._stats = None

    # ---------------------------------------------------------------------
    # Get the stream definition params

    # Limiting window of the stream. It would not return any records until
    # 'first_record' ID is read (or very first with the ID above that). The
    # stream will return EOS once it reads record with ID 'last_record' or
    # above (NOTE: the name 'lastRecord' is misleading because it is NOT
    #  inclusive).
    firstRecordIdx = sourceDict.get('first_record', None)
    self._sourceLastRecordIdx = sourceDict.get('last_record', None)

    # If a bookmark was given, then override first_record from the stream
    #  definition.
    if bookmark is not None:
      firstRecordIdx = None


    # Column names must be provided in the streamdef json
    # Special case is ['*'], meaning all available names from the record stream
    self._streamFieldNames = sourceDict.get('columns', None)
    if self._streamFieldNames != None and self._streamFieldNames[0] == '*':
      self._needFieldsFiltering = False
    else:
      self._needFieldsFiltering = True

    # Types must be specified in streamdef json, or in case of the
    #  file_recod_stream types could be implicit from the file
    streamFieldTypes = sourceDict.get('types', None)
    self._logger.debug('Types from the def: %s', streamFieldTypes)
    # Validate that all types are valid
    if streamFieldTypes is not None:
      for dataType in streamFieldTypes:
        assert FieldMetaType.isValid(dataType)

    # Reset, sequence and time fields might be provided by streamdef json
    streamResetFieldName = streamDef.get('resetField', None)
    streamTimeFieldName = streamDef.get('timeField', None)
    streamSequenceFieldName = streamDef.get('sequenceIdField', None)
    self._logger.debug('r, t, s fields: %s, %s, %s', streamResetFieldName,
                                                      streamTimeFieldName,
                                                      streamSequenceFieldName)


    # =======================================================================
    # Open up the underlying record store
    dataUrl = sourceDict.get('source', None)
    assert dataUrl is not None
    self._recordStore = self._openStream(dataUrl, isBlocking, maxTimeout,
                                         bookmark, firstRecordIdx)
    assert self._recordStore is not None


    # =======================================================================
    # Prepare the data structures we need for returning just the fields
    #  the caller wants from each record
    recordStoreFields = self._recordStore.getFields()
    self._recordStoreFieldNames = self._recordStore.getFieldNames()

    if not self._needFieldsFiltering:
      self._streamFieldNames = self._recordStoreFieldNames

    # Build up the field definitions for each field. This is a list of tuples
    #  of (name, type, special)
    self._streamFields = []
    for dstIdx, name in enumerate(self._streamFieldNames):
      if name not in self._recordStoreFieldNames:
        raise RuntimeError("The column '%s' from the stream definition "
          "is not present in the underlying stream which has the following "
          "columns: %s" % (name, self._recordStoreFieldNames))

      fieldIdx = self._recordStoreFieldNames.index(name)
      fieldType = recordStoreFields[fieldIdx].type
      fieldSpecial = recordStoreFields[fieldIdx].special

      # If the types or specials were defined in the stream definition,
      #   then override what was found in the record store
      if streamFieldTypes is not None:
        fieldType = streamFieldTypes[dstIdx]

      if streamResetFieldName is not None and streamResetFieldName == name:
        fieldSpecial = FieldMetaSpecial.reset
      if streamTimeFieldName is not None and streamTimeFieldName == name:
        fieldSpecial = FieldMetaSpecial.timestamp
      if (streamSequenceFieldName is not None and
          streamSequenceFieldName == name):
        fieldSpecial = FieldMetaSpecial.sequence

      self._streamFields.append(FieldMetaInfo(name, fieldType, fieldSpecial))


    # ========================================================================
    # Create the aggregator which will handle aggregation of records before
    #  returning them.
    self._aggregator = Aggregator(
            aggregationInfo=streamDef.get('aggregation', None),
            inputFields=recordStoreFields,
            timeFieldName=streamDef.get('timeField', None),
            sequenceIdFieldName=streamDef.get('sequenceIdField', None),
            resetFieldName=streamDef.get('resetField', None))

    # We rely on the aggregator to tell us the bookmark of the last raw input
    #  that contributed to the aggregated record
    self._aggBookmark = None

    # Compute the aggregation period in terms of months and seconds
    if 'aggregation' in streamDef:
      self._aggMonthsAndSeconds = nupic.support.aggregationToMonthsSeconds(
                streamDef.get('aggregation'))
    else:
      self._aggMonthsAndSeconds = None


    # ========================================================================
    # Are we saving the generated output to a csv?
    if saveOutput:
      tmpDir = tempfile.mkdtemp()
      outFilename = os.path.join(tmpDir, "generated_output.csv")
      self._logger.info("StreamReader: Saving generated records to: '%s'" %
                        outFilename)
      self._writer = FileRecordStream(streamID=outFilename,
                                      write=True,
                                      fields=self._streamFields)
    else:
      self._writer = None
예제 #47
0
class _BasicPredictionWriter(PredictionWriterIface):
  """ This class defines the basic (file-based) implementation of
  PredictionWriterIface, whose instances are returned by
  BasicPredictionWriterFactory
  """
  def __init__(self, experimentDir, label, inferenceType,
               fields, metricNames=None, checkpointSource=None):
    """ Constructor

    experimentDir:
                  experiment directory path that contains description.py

    label:        A label string to incorporate into the filename.


    inferenceElements:


    inferenceType:
                  An constant from opfutils.InferenceType for the
                  requested prediction writer

    fields:       a non-empty sequence of nupic.data.fieldmeta.FieldMetaInfo
                  representing fields that will be emitted to this prediction
                  writer

    metricNames:  OPTIONAL - A list of metric names that well be emiited by this
                  prediction writer

    checkpointSource:
                  If not None, a File-like object containing the
                  previously-checkpointed predictions for setting the initial
                  contents of this PredictionOutputStream.  Will be copied
                  before returning, if needed.
    """
    #assert len(fields) > 0

    self.__experimentDir = experimentDir

    # opfutils.InferenceType kind value
    self.__inferenceType = inferenceType

    # A tuple of nupic.data.fieldmeta.FieldMetaInfo
    self.__inputFieldsMeta = tuple(copy.deepcopy(fields))
    self.__numInputFields = len(self.__inputFieldsMeta)
    self.__label = label
    if metricNames is not None:
      metricNames.sort()
    self.__metricNames = metricNames

    # Define our output field meta info
    self.__outputFieldsMeta = []

    # The list of inputs that we include in the prediction output
    self._rawInputNames = []

    # Output dataset
    self.__datasetPath = None
    self.__dataset = None

    # Save checkpoint data until we're ready to create the output dataset
    self.__checkpointCache = None
    if checkpointSource is not None:
      checkpointSource.seek(0)
      self.__checkpointCache = StringIO.StringIO()
      shutil.copyfileobj(checkpointSource, self.__checkpointCache)

    return


  def __openDatafile(self, modelResult):
    """Open the data file and write the header row"""

    # Write reset bit
    resetFieldMeta = FieldMetaInfo(
      name="reset",
      type=FieldMetaType.integer,
      special = FieldMetaSpecial.reset)

    self.__outputFieldsMeta.append(resetFieldMeta)


    # -----------------------------------------------------------------------
    # Write each of the raw inputs that go into the encoders
    rawInput = modelResult.rawInput
    rawFields = rawInput.keys()
    rawFields.sort()
    for field in rawFields:
      if field.startswith('_') or field == 'reset':
        continue
      value = rawInput[field]
      meta = FieldMetaInfo(name=field, type=FieldMetaType.string,
                           special=FieldMetaSpecial.none)
      self.__outputFieldsMeta.append(meta)
      self._rawInputNames.append(field)


    # -----------------------------------------------------------------------
    # Handle each of the inference elements
    for inferenceElement, value in modelResult.inferences.iteritems():
      inferenceLabel = InferenceElement.getLabel(inferenceElement)

      # TODO: Right now we assume list inferences are associated with
      # The input field metadata
      if type(value) in (list, tuple):
        # Append input and prediction field meta-info
        self.__outputFieldsMeta.extend(self.__getListMetaInfo(inferenceElement))

      elif isinstance(value, dict):
          self.__outputFieldsMeta.extend(self.__getDictMetaInfo(inferenceElement,
                                                                value))
      else:

        if InferenceElement.getInputElement(inferenceElement):
          self.__outputFieldsMeta.append(FieldMetaInfo(name=inferenceLabel+".actual",
                type=FieldMetaType.string, special = ''))
        self.__outputFieldsMeta.append(FieldMetaInfo(name=inferenceLabel,
                type=FieldMetaType.string, special = ''))

    if self.__metricNames:
      for metricName in self.__metricNames:
        metricField = FieldMetaInfo(
          name = metricName,
          type = FieldMetaType.float,
          special = FieldMetaSpecial.none)

        self.__outputFieldsMeta.append(metricField)

    # Create the inference directory for our experiment
    inferenceDir = _FileUtils.createExperimentInferenceDir(self.__experimentDir)

    # Consctruct the prediction dataset file path
    filename = (self.__label + "." +
               opfutils.InferenceType.getLabel(self.__inferenceType) +
               ".predictionLog.csv")
    self.__datasetPath = os.path.join(inferenceDir, filename)

    # Create the output dataset
    print "OPENING OUTPUT FOR PREDICTION WRITER AT: {0!r}".format(self.__datasetPath)
    print "Prediction field-meta: {0!r}".format([tuple(i) for i in self.__outputFieldsMeta])
    self.__dataset = FileRecordStream(streamID=self.__datasetPath, write=True,
                                     fields=self.__outputFieldsMeta)

    # Copy data from checkpoint cache
    if self.__checkpointCache is not None:
      self.__checkpointCache.seek(0)

      reader = csv.reader(self.__checkpointCache, dialect='excel')

      # Skip header row
      try:
        header = reader.next()
      except StopIteration:
        print "Empty record checkpoint initializer for {0!r}".format(self.__datasetPath)
      else:
        assert tuple(self.__dataset.getFieldNames()) == tuple(header), \
          "dataset.getFieldNames(): {0!r}; predictionCheckpointFieldNames: {1!r}".format(
          tuple(self.__dataset.getFieldNames()), tuple(header))

      # Copy the rows from checkpoint
      numRowsCopied = 0
      while True:
        try:
          row = reader.next()
        except StopIteration:
          break

        #print "DEBUG: restoring row from checkpoint: %r" % (row,)

        self.__dataset.appendRecord(row)
        numRowsCopied += 1

      self.__dataset.flush()

      print "Restored {0:d} rows from checkpoint for {1!r}".format(
        numRowsCopied, self.__datasetPath)

      # Dispose of our checkpoint cache
      self.__checkpointCache.close()
      self.__checkpointCache = None

    return


  def setLoggedMetrics(self, metricNames):
    """ Tell the writer which metrics should be written

    Parameters:
    -----------------------------------------------------------------------
    metricsNames: A list of metric lables to be written
    """
    if metricNames is None:
      self.__metricNames = set([])
    else:
      self.__metricNames = set(metricNames)


  def close(self):
    """ [virtual method override] Closes the writer (e.g., close the underlying
    file)
    """

    if self.__dataset:
      self.__dataset.close()
    self.__dataset = None

    return


  def __getListMetaInfo(self, inferenceElement):
    """ Get field metadata information for inferences that are of list type
    TODO: Right now we assume list inferences are associated with the input field
    metadata
    """
    fieldMetaInfo = []
    inferenceLabel = InferenceElement.getLabel(inferenceElement)

    for inputFieldMeta in self.__inputFieldsMeta:
      if InferenceElement.getInputElement(inferenceElement):
        outputFieldMeta = FieldMetaInfo(
          name=inputFieldMeta.name + ".actual",
          type=inputFieldMeta.type,
          special=inputFieldMeta.special
        )

      predictionField = FieldMetaInfo(
        name=inputFieldMeta.name + "." + inferenceLabel,
        type=inputFieldMeta.type,
        special=inputFieldMeta.special
      )

      fieldMetaInfo.append(outputFieldMeta)
      fieldMetaInfo.append(predictionField)

    return fieldMetaInfo


  def __getDictMetaInfo(self, inferenceElement, inferenceDict):
    """Get field metadate information for inferences that are of dict type"""
    fieldMetaInfo = []
    inferenceLabel = InferenceElement.getLabel(inferenceElement)

    if InferenceElement.getInputElement(inferenceElement):
      fieldMetaInfo.append(FieldMetaInfo(name=inferenceLabel+".actual",
                                         type=FieldMetaType.string,
                                         special = ''))

    keys = sorted(inferenceDict.keys())
    for key in keys:
      fieldMetaInfo.append(FieldMetaInfo(name=inferenceLabel+"."+str(key),
                                         type=FieldMetaType.string,
                                         special=''))


    return fieldMetaInfo


  def append(self, modelResult):
    """ [virtual method override] Emits a single prediction as input versus
    predicted.

    modelResult:    An opfutils.ModelResult object that contains the model input
                    and output for the current timestep.
    """

    #print "DEBUG: _BasicPredictionWriter: writing modelResult: %r" % (modelResult,)

    # If there are no inferences, don't write anything
    inferences = modelResult.inferences
    hasInferences = False
    if inferences is not None:
      for value in inferences.itervalues():
        hasInferences = hasInferences or (value is not None)

    if not hasInferences:
      return

    if self.__dataset is None:
      self.__openDatafile(modelResult)

    inputData = modelResult.sensorInput

    sequenceReset = int(bool(inputData.sequenceReset))
    outputRow = [sequenceReset]


    # -----------------------------------------------------------------------
    # Write out the raw inputs
    rawInput = modelResult.rawInput
    for field in self._rawInputNames:
      outputRow.append(str(rawInput[field]))

    # -----------------------------------------------------------------------
    # Write out the inference element info
    for inferenceElement, outputVal in inferences.iteritems():
      inputElement = InferenceElement.getInputElement(inferenceElement)
      if inputElement:
        inputVal = getattr(inputData, inputElement)
      else:
        inputVal = None

      if type(outputVal) in (list, tuple):
        assert type(inputVal) in (list, tuple, None)

        for iv, ov in zip(inputVal, outputVal):
          # Write actual
          outputRow.append(str(iv))

          # Write inferred
          outputRow.append(str(ov))
      elif isinstance(outputVal, dict):
        if inputVal is not None:
          # If we have a predicted field, include only that in the actuals
          if modelResult.predictedFieldName is not None:
            outputRow.append(str(inputVal[modelResult.predictedFieldName]))
          else:
            outputRow.append(str(inputVal))
        for key in sorted(outputVal.keys()):
          outputRow.append(str(outputVal[key]))
      else:
        if inputVal is not None:
          outputRow.append(str(inputVal))
        outputRow.append(str(outputVal))

    metrics = modelResult.metrics
    for metricName in self.__metricNames:
      outputRow.append(metrics.get(metricName, 0.0))

    #print "DEBUG: _BasicPredictionWriter: writing outputRow: %r" % (outputRow,)

    self.__dataset.appendRecord(outputRow)

    self.__dataset.flush()

    return

  def checkpoint(self, checkpointSink, maxRows):
    """ [virtual method override] Save a checkpoint of the prediction output
    stream. The checkpoint comprises up to maxRows of the most recent inference
    records.

    Parameters:
    ----------------------------------------------------------------------
    checkpointSink:     A File-like object where predictions checkpoint data, if
                        any, will be stored.
    maxRows:            Maximum number of most recent inference rows
                        to checkpoint.
    """

    checkpointSink.truncate()

    if self.__dataset is None:
      if self.__checkpointCache is not None:
        self.__checkpointCache.seek(0)
        shutil.copyfileobj(self.__checkpointCache, checkpointSink)
        checkpointSink.flush()
        return
      else:
        # Nothing to checkpoint
        return

    self.__dataset.flush()
    totalDataRows = self.__dataset.getDataRowCount()

    if totalDataRows == 0:
      # Nothing to checkpoint
      return

    # Open reader of prediction file (suppress missingValues conversion)
    reader = FileRecordStream(self.__datasetPath, missingValues=[])

    # Create CSV writer for writing checkpoint rows
    writer = csv.writer(checkpointSink)

    # Write the header row to checkpoint sink -- just field names
    writer.writerow(reader.getFieldNames())

    # Determine number of rows to checkpoint
    numToWrite = min(maxRows, totalDataRows)

    # Skip initial rows to get to the rows that we actually need to checkpoint
    numRowsToSkip = totalDataRows - numToWrite
    for i in xrange(numRowsToSkip):
      reader.next()

    # Write the data rows to checkpoint sink
    numWritten = 0
    while True:
      row = reader.getNextRecord()
      if row is None:
        break;

      row =  [str(element) for element in row]

      #print "DEBUG: _BasicPredictionWriter: checkpointing row: %r" % (row,)

      writer.writerow(row)

      numWritten +=1

    assert numWritten == numToWrite, \
      "numWritten ({0!s}) != numToWrite ({1!s})".format(numWritten, numToWrite)


    checkpointSink.flush()

    return
예제 #48
0
  def testExperimentResults(self):
    """Run specific experiments and verify that they are producing the correct
    results.

    opfDir is the examples/opf directory in the install path
    and is used to find run_opf_experiment.py

    The testdir is the directory that contains the experiments we will be
    running. When running in the auto-build setup, this will be a temporary
    directory that has had this script, as well as the specific experiments
    we will be running, copied into it by the qa/autotest/prediction_results.py
    script.
    When running stand-alone from the command line, this will point to the
    examples/prediction directory in the install tree (same as predictionDir)

    """

    nupic_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                             "..", "..", "..", "..")

    opfDir = os.path.join(nupic_dir, "examples", "opf")

    testDir = opfDir

    # The testdir is the directory that contains the experiments we will be
    #  running. When running in the auto-build setup, this will be a temporary
    #  directory that has had this script, as well as the specific experiments
    #  we will be running, copied into it by the
    #  qa/autotest/prediction_results.py script.
    # When running stand-alone from the command line, we can simply point to the
    #  examples/prediction directory in the install tree.
    if not os.path.exists(os.path.join(testDir, "experiments/classification")):
      testDir = opfDir

    # Generate any dynamically generated datasets now
    command = ['python', os.path.join(testDir, 'experiments', 'classification',
                                       'makeDatasets.py')]
    retval = call(command)
    self.assertEqual(retval, 0)


    # Generate any dynamically generated datasets now
    command = ['python', os.path.join(testDir, 'experiments', 'multistep',
                                       'make_datasets.py')]
    retval = call(command)
    self.assertEqual(retval, 0)


    # Generate any dynamically generated datasets now
    command = ['python', os.path.join(testDir, 'experiments',
                                'spatial_classification', 'make_datasets.py')]
    retval = call(command)
    self.assertEqual(retval, 0)


    # Run from the test directory so that we can find our experiments
    os.chdir(testDir)

    runExperiment = os.path.join(nupic_dir, "scripts", "run_opf_experiment.py")

    # A list of experiments to run.  Valid attributes:
    #   experimentDir - Required, path to the experiment directory containing
    #                       description.py
    #   args          - optional. List of arguments for run_opf_experiment
    #   results       - A dictionary of expected results. The keys are tuples
    #                    containing (predictionLogFileName, columnName). The
    #                    value is a (min, max) expected value from the last row
    #                    in the prediction log.
    multistepTests = [
      # For this one, in theory the error for 1 step should be < 0.20
      { 'experimentDir': 'experiments/multistep/simple_0',
        'results': {
          ('DefaultTask.TemporalMultiStep.predictionLog.csv',
           "multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=200:field=field1"):
                    (0.0, 0.20),
        }
      },

      # For this one, in theory the error for 1 step should be < 0.50, but we
      #  get slightly higher because our sample size is smaller than ideal
      { 'experimentDir': 'experiments/multistep/simple_0_f2',
        'results': {
          ('DefaultTask.TemporalMultiStep.predictionLog.csv',
           "multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=200:field=field2"):
                    (0.0, 0.66),
        }
      },

      # For this one, in theory the error for 1 step should be < 0.20
      { 'experimentDir': 'experiments/multistep/simple_1',
        'results': {
          ('DefaultTask.TemporalMultiStep.predictionLog.csv',
           "multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=200:field=field1"):
                    (0.0, 0.20),
        }
      },

      # For this test, we haven't figured out the theoretical error, this
      #  error is determined empirically from actual results
      { 'experimentDir': 'experiments/multistep/simple_1_f2',
        'results': {
          ('DefaultTask.TemporalMultiStep.predictionLog.csv',
           "multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=200:field=field2"):
                    (0.0, 3.76),
        }
      },

      # For this one, in theory the error for 1 step should be < 0.20, but we
      #  get slightly higher because our sample size is smaller than ideal
      { 'experimentDir': 'experiments/multistep/simple_2',
        'results': {
          ('DefaultTask.TemporalMultiStep.predictionLog.csv',
           "multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=200:field=field1"):
                    (0.0, 0.31),
        }
      },

      # For this one, in theory the error for 1 step should be < 0.10 and for
      #  3 step < 0.30, but our actual results are better.
      { 'experimentDir': 'experiments/multistep/simple_3',
        'results': {
          ('DefaultTask.TemporalMultiStep.predictionLog.csv',
           "multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=200:field=field1"):
                    (0.0, 0.06),
          ('DefaultTask.TemporalMultiStep.predictionLog.csv',
           "multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=3:window=200:field=field1"):
                    (0.0, 0.20),
        }
      },

      # For this test, we haven't figured out the theoretical error, this
      #  error is determined empirically from actual results
      { 'experimentDir': 'experiments/multistep/simple_3_f2',
        'results': {
          ('DefaultTask.TemporalMultiStep.predictionLog.csv',
           "multiStepBestPredictions:multiStep:errorMetric='aae':steps=1:window=200:field=field2"):
                    (0.0, 0.6),
          ('DefaultTask.TemporalMultiStep.predictionLog.csv',
           "multiStepBestPredictions:multiStep:errorMetric='aae':steps=3:window=200:field=field2"):
                    (0.0, 1.8),
        }
      },

      # Test missing record support.
      # Should have 0 error by the end of the dataset
      { 'experimentDir': 'experiments/missing_record/simple_0',
        'results': {
          ('DefaultTask.NontemporalMultiStep.predictionLog.csv',
           "multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=1:window=25:field=field1"):
                    (1.0, 1.0),
        }
      },

    ] # end of multistepTests

    classificationTests = [
      # ----------------------------------------------------------------------
      # Classification Experiments
      { 'experimentDir': 'experiments/classification/category_hub_TP_0',
        'results': {
            ('OnlineLearning.TemporalClassification.predictionLog.csv',
             'classification:avg_err:window=200'): (0.0, 0.020),
            }
      },

      { 'experimentDir': 'experiments/classification/category_TM_0',
        'results': {
            ('OnlineLearning.TemporalClassification.predictionLog.csv',
             'classification:avg_err:window=200'): (0.0, 0.045),

            ('OnlineLearning.TemporalClassification.predictionLog.csv',
             'classConfidences:neg_auc:computeEvery=10:window=200'): (-1.0, -0.98),
            }
      },

      { 'experimentDir': 'experiments/classification/category_TM_1',
        'results': {
            ('OnlineLearning.TemporalClassification.predictionLog.csv',
             'classification:avg_err:window=200'): (0.0, 0.005),
            }
      },

      { 'experimentDir': 'experiments/classification/scalar_TP_0',
        'results': {
            ('OnlineLearning.TemporalClassification.predictionLog.csv',
             'classification:avg_err:window=200'): (0.0, 0.155),

            ('OnlineLearning.TemporalClassification.predictionLog.csv',
             'classConfidences:neg_auc:computeEvery=10:window=200'): (-1.0, -0.900),
            }
      },

      { 'experimentDir': 'experiments/classification/scalar_TP_1',
        'results': {
            ('OnlineLearning.TemporalClassification.predictionLog.csv',
             'classification:avg_err:window=200'):  (0.0, 0.03),
            }
      },

    ] # End of classification tests
    
    spatialClassificationTests = [
      { 'experimentDir': 'experiments/spatial_classification/category_0',
        'results': {
            ('DefaultTask.NontemporalClassification.predictionLog.csv',
             "multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=0:window=100:field=classification"): 
                    (0.0, 0.05),
            }

      },

      { 'experimentDir': 'experiments/spatial_classification/category_1',
        'results': {
            ('DefaultTask.NontemporalClassification.predictionLog.csv',
             "multiStepBestPredictions:multiStep:errorMetric='avg_err':steps=0:window=100:field=classification"): 
                    (0.0, 0.0),
            }
      },
      
      { 'experimentDir': 'experiments/spatial_classification/scalar_0',
        'results': {
            ('DefaultTask.NontemporalClassification.predictionLog.csv',
             "multiStepBestPredictions:multiStep:errorMetric='aae':steps=0:window=100:field=classification"): 
                    (0.0, 0.025),
            }
      },

      { 'experimentDir': 'experiments/spatial_classification/scalar_1',
        'results': {
            ('DefaultTask.NontemporalClassification.predictionLog.csv',
             "multiStepBestPredictions:multiStep:errorMetric='aae':steps=0:window=100:field=classification"): 
                    (-1e-10, 0.01),
            }
      },


    ]

    anomalyTests = [
      # ----------------------------------------------------------------------
      # Classification Experiments
      { 'experimentDir': 'experiments/anomaly/temporal/simple',
        'results': {
            ('DefaultTask.TemporalAnomaly.predictionLog.csv',
             'anomalyScore:passThruPrediction:window=1000:field=f'): (0.02,
                                                                      0.04),
          }
      },



    ] # End of anomaly tests

    tests = []
    tests += multistepTests
    tests += classificationTests
    tests += spatialClassificationTests
    tests += anomalyTests

    # Uncomment this to only run a specific experiment(s)
    #tests = tests[7:8]

    # This contains a list of tuples: (expDir, key, results)
    summaryOfResults = []
    startTime = time.time()

    testIdx = -1
    for test in tests:
      testIdx += 1
      expDirectory = test['experimentDir']

      # -------------------------------------------------------------------
      # Remove files/directories generated by previous tests:
      toDelete = []

      # Remove inference results
      path = os.path.join(expDirectory, "inference")
      toDelete.append(path)
      path = os.path.join(expDirectory, "savedmodels")
      toDelete.append(path)

      for path in toDelete:
        if not os.path.exists(path):
          continue
        print "Removing %s ..." % path
        if os.path.isfile(path):
          os.remove(path)
        else:
          shutil.rmtree(path)


      # ------------------------------------------------------------------------
      # Run the test.
      args = test.get('args', [])
      print "Running experiment %s ..." % (expDirectory)
      command = ['python', runExperiment, expDirectory] + args
      retVal = call(command)

      # If retVal is non-zero and this was not a negative test or if retVal is
      # zero and this is a negative test something went wrong.
      if retVal:
        print "Details of failed test: %s" % test
        print("TestIdx %d, OPF experiment '%s' failed with return code %i." %
              (testIdx, expDirectory, retVal))
      self.assertFalse(retVal)


      # -----------------------------------------------------------------------
      # Check the results
      for (key, expValues) in test['results'].items():
        (logFilename, colName) = key

        # Open the prediction log file
        logFile = FileRecordStream(os.path.join(expDirectory, 'inference',
                                                logFilename))
        colNames = [x[0] for x in logFile.getFields()]
        if not colName in colNames:
          print "TestIdx %d: %s not one of the columns in " \
            "prediction log file. Available column names are: %s" % (testIdx,
                    colName, colNames)
        self.assertTrue(colName in colNames)
        colIndex = colNames.index(colName)

        # Read till we get to the last line
        while True:
          try:
            row = logFile.next()
          except StopIteration:
            break
        result = row[colIndex]

        # Save summary of results
        summaryOfResults.append((expDirectory, colName, result))

        print "Actual result for %s, %s:" % (expDirectory, colName), result
        print "Expected range:", expValues
        failed = (expValues[0] is not None and result < expValues[0]) \
            or (expValues[1] is not None and result > expValues[1])
        if failed:
          print ("TestIdx %d: Experiment %s failed. \nThe actual result"
             " for %s (%s) was outside the allowed range of %s" % (testIdx,
              expDirectory, colName, result, expValues))
        else:
          print "  Within expected range."
        self.assertFalse(failed)


    # =======================================================================
    # Print summary of results:
    print
    print "Summary of results in all experiments run:"
    print "========================================="
    prevExpDir = None
    for (expDir, key, results) in summaryOfResults:
      if expDir != prevExpDir:
        print
        print expDir
        prevExpDir = expDir
      print "  %s: %s" % (key, results)

    print "\nElapsed time: %.1f seconds" % (time.time() - startTime)
예제 #49
0
  def __openDatafile(self, modelResult):
    """Open the data file and write the header row"""

    # Write reset bit
    resetFieldMeta = FieldMetaInfo(
      name="reset",
      type=FieldMetaType.integer,
      special = FieldMetaSpecial.reset)

    self.__outputFieldsMeta.append(resetFieldMeta)


    # -----------------------------------------------------------------------
    # Write each of the raw inputs that go into the encoders
    rawInput = modelResult.rawInput
    rawFields = rawInput.keys()
    rawFields.sort()
    for field in rawFields:
      if field.startswith('_') or field == 'reset':
        continue
      value = rawInput[field]
      meta = FieldMetaInfo(name=field, type=FieldMetaType.string,
                           special=FieldMetaSpecial.none)
      self.__outputFieldsMeta.append(meta)
      self._rawInputNames.append(field)


    # -----------------------------------------------------------------------
    # Handle each of the inference elements
    for inferenceElement, value in modelResult.inferences.iteritems():
      inferenceLabel = InferenceElement.getLabel(inferenceElement)

      # TODO: Right now we assume list inferences are associated with
      # The input field metadata
      if type(value) in (list, tuple):
        # Append input and prediction field meta-info
        self.__outputFieldsMeta.extend(self.__getListMetaInfo(inferenceElement))

      elif isinstance(value, dict):
          self.__outputFieldsMeta.extend(self.__getDictMetaInfo(inferenceElement,
                                                                value))
      else:

        if InferenceElement.getInputElement(inferenceElement):
          self.__outputFieldsMeta.append(FieldMetaInfo(name=inferenceLabel+".actual",
                type=FieldMetaType.string, special = ''))
        self.__outputFieldsMeta.append(FieldMetaInfo(name=inferenceLabel,
                type=FieldMetaType.string, special = ''))

    if self.__metricNames:
      for metricName in self.__metricNames:
        metricField = FieldMetaInfo(
          name = metricName,
          type = FieldMetaType.float,
          special = FieldMetaSpecial.none)

        self.__outputFieldsMeta.append(metricField)

    # Create the inference directory for our experiment
    inferenceDir = _FileUtils.createExperimentInferenceDir(self.__experimentDir)

    # Consctruct the prediction dataset file path
    filename = (self.__label + "." +
               opfutils.InferenceType.getLabel(self.__inferenceType) +
               ".predictionLog.csv")
    self.__datasetPath = os.path.join(inferenceDir, filename)

    # Create the output dataset
    print "OPENING OUTPUT FOR PREDICTION WRITER AT: {0!r}".format(self.__datasetPath)
    print "Prediction field-meta: {0!r}".format([tuple(i) for i in self.__outputFieldsMeta])
    self.__dataset = FileRecordStream(streamID=self.__datasetPath, write=True,
                                     fields=self.__outputFieldsMeta)

    # Copy data from checkpoint cache
    if self.__checkpointCache is not None:
      self.__checkpointCache.seek(0)

      reader = csv.reader(self.__checkpointCache, dialect='excel')

      # Skip header row
      try:
        header = reader.next()
      except StopIteration:
        print "Empty record checkpoint initializer for {0!r}".format(self.__datasetPath)
      else:
        assert tuple(self.__dataset.getFieldNames()) == tuple(header), \
          "dataset.getFieldNames(): {0!r}; predictionCheckpointFieldNames: {1!r}".format(
          tuple(self.__dataset.getFieldNames()), tuple(header))

      # Copy the rows from checkpoint
      numRowsCopied = 0
      while True:
        try:
          row = reader.next()
        except StopIteration:
          break

        #print "DEBUG: restoring row from checkpoint: %r" % (row,)

        self.__dataset.appendRecord(row)
        numRowsCopied += 1

      self.__dataset.flush()

      print "Restored {0:d} rows from checkpoint for {1!r}".format(
        numRowsCopied, self.__datasetPath)

      # Dispose of our checkpoint cache
      self.__checkpointCache.close()
      self.__checkpointCache = None

    return
  def testSimpleMulticlassNetworkPY(self):
    # Setup data record stream of fake data (with three categories)
    filename = _getTempFileName()
    fields = [("timestamp", "datetime", "T"),
              ("value", "float", ""),
              ("reset", "int", "R"),
              ("sid", "int", "S"),
              ("categories", "list", "C")]
    records = (
      [datetime(day=1, month=3, year=2010), 0.0, 1, 0, "0"],
      [datetime(day=2, month=3, year=2010), 1.0, 0, 0, "1"],
      [datetime(day=3, month=3, year=2010), 0.0, 0, 0, "0"],
      [datetime(day=4, month=3, year=2010), 1.0, 0, 0, "1"],
      [datetime(day=5, month=3, year=2010), 0.0, 0, 0, "0"],
      [datetime(day=6, month=3, year=2010), 1.0, 0, 0, "1"],
      [datetime(day=7, month=3, year=2010), 0.0, 0, 0, "0"],
      [datetime(day=8, month=3, year=2010), 1.0, 0, 0, "1"])
    dataSource = FileRecordStream(streamID=filename, write=True, fields=fields)
    for r in records:
      dataSource.appendRecord(list(r))

    # Create the network and get region instances.
    net = Network()
    net.addRegion("sensor", "py.RecordSensor", "{'numCategories': 3}")
    net.addRegion("classifier", "py.SDRClassifierRegion",
                  "{steps: '0', alpha: 0.001, implementation: 'py'}")
    net.link("sensor", "classifier", "UniformLink", "",
             srcOutput="dataOut", destInput="bottomUpIn")
    net.link("sensor", "classifier", "UniformLink", "",
             srcOutput="categoryOut", destInput="categoryIn")
    sensor = net.regions["sensor"]
    classifier = net.regions["classifier"]

    # Setup sensor region encoder and data stream.
    dataSource.close()
    dataSource = FileRecordStream(filename)
    sensorRegion = sensor.getSelf()
    sensorRegion.encoder = MultiEncoder()
    sensorRegion.encoder.addEncoder(
      "value", ScalarEncoder(21, 0.0, 13.0, n=256, name="value"))
    sensorRegion.dataSource = dataSource

    # Get ready to run.
    net.initialize()

    # Train the network (by default learning is ON in the classifier, but assert
    # anyway) and then turn off learning and turn on inference mode.
    self.assertEqual(classifier.getParameter("learningMode"), 1)
    net.run(8)

    # Test the network on the same data as it trained on; should classify with
    # 100% accuracy.
    classifier.setParameter("inferenceMode", 1)
    classifier.setParameter("learningMode", 0)

    # Assert learning is OFF and that the classifier learned the dataset.
    self.assertEqual(classifier.getParameter("learningMode"), 0,
                     "Learning mode is not turned off.")
    self.assertEqual(classifier.getParameter("inferenceMode"), 1,
                     "Inference mode is not turned on.")

    # make sure we can access all the parameters with getParameter
    self.assertEqual(classifier.getParameter("maxCategoryCount"), 2000)
    self.assertAlmostEqual(float(classifier.getParameter("alpha")), 0.001)
    self.assertEqual(int(classifier.getParameter("steps")), 0)
    self.assertTrue(classifier.getParameter("implementation") == "py")
    self.assertEqual(classifier.getParameter("verbosity"), 0)


    expectedCats = ([0.0], [1.0], [0.0], [1.0], [0.0], [1.0], [0.0], [1.0],)
    dataSource.rewind()
    for i in xrange(8):
      net.run(1)
      inferredCats = classifier.getOutputData("categoriesOut")
      self.assertSequenceEqual(expectedCats[i], inferredCats.tolist(),
                               "Classififer did not infer expected category "
                               "for record number {}.".format(i))
    # Close data stream, delete file.
    dataSource.close()
    os.remove(filename)
예제 #51
0
def _generateOverlapping(filename="overlap.csv", numSequences=2, elementsPerSeq=3, 
                    numRepeats=10, hub=[0,1], hubOffset=1, resets=False):
  
  """ Generate a temporal dataset containing sequences that overlap one or more
  elements with other sequences. 
  
  Parameters:
  ----------------------------------------------------
  filename:       name of the file to produce, including extension. It will
                  be created in a 'datasets' sub-directory within the 
                  directory containing this script. 
  numSequences:   how many sequences to generate
  elementsPerSeq: length of each sequence
  numRepeats:     how many times to repeat each sequence in the output 
  hub:            sub-sequence to place within each other sequence 
  hubOffset:      where, within each sequence, to place the hub
  resets:         if True, turn on reset at start of each sequence
  """
  
  # Check for conflicts in arguments
  assert (hubOffset + len(hub) <= elementsPerSeq)
  
  # Create the output file
  scriptDir = os.path.dirname(__file__)
  pathname = os.path.join(scriptDir, 'datasets', filename)
  print "Creating %s..." % (pathname)
  fields = [('reset', 'int', 'R'), ('category', 'int', 'C'),
            ('field1', 'string', '')]  
  outFile = FileRecordStream(pathname, write=True, fields=fields)
  

  # Create the sequences with the hub in the middle
  sequences = []
  nextElemIdx = max(hub)+1
  
  for _ in range(numSequences):
    seq = []
    for j in range(hubOffset):
      seq.append(nextElemIdx)
      nextElemIdx += 1
    for j in hub:
      seq.append(j)
    j = hubOffset + len(hub)
    while j < elementsPerSeq:
      seq.append(nextElemIdx)
      nextElemIdx += 1
      j += 1
    sequences.append(seq)
  
  # Write out the sequences in random order
  seqIdxs = []
  for _ in range(numRepeats):
    seqIdxs += range(numSequences)
  random.shuffle(seqIdxs)
  
  for seqIdx in seqIdxs:
    reset = int(resets)
    seq = sequences[seqIdx]
    for (x) in seq:
      outFile.appendRecord([reset, str(seqIdx), str(x)])
      reset = 0

  outFile.close()
예제 #52
0
  def checkpoint(self, checkpointSink, maxRows):
    """ [virtual method override] Save a checkpoint of the prediction output
    stream. The checkpoint comprises up to maxRows of the most recent inference
    records.

    Parameters:
    ----------------------------------------------------------------------
    checkpointSink:     A File-like object where predictions checkpoint data, if
                        any, will be stored.
    maxRows:            Maximum number of most recent inference rows
                        to checkpoint.
    """

    checkpointSink.truncate()

    if self.__dataset is None:
      if self.__checkpointCache is not None:
        self.__checkpointCache.seek(0)
        shutil.copyfileobj(self.__checkpointCache, checkpointSink)
        checkpointSink.flush()
        return
      else:
        # Nothing to checkpoint
        return

    self.__dataset.flush()
    totalDataRows = self.__dataset.getDataRowCount()

    if totalDataRows == 0:
      # Nothing to checkpoint
      return

    # Open reader of prediction file (suppress missingValues conversion)
    reader = FileRecordStream(self.__datasetPath, missingValues=[])

    # Create CSV writer for writing checkpoint rows
    writer = csv.writer(checkpointSink)

    # Write the header row to checkpoint sink -- just field names
    writer.writerow(reader.getFieldNames())

    # Determine number of rows to checkpoint
    numToWrite = min(maxRows, totalDataRows)

    # Skip initial rows to get to the rows that we actually need to checkpoint
    numRowsToSkip = totalDataRows - numToWrite
    for i in xrange(numRowsToSkip):
      reader.next()

    # Write the data rows to checkpoint sink
    numWritten = 0
    while True:
      row = reader.getNextRecord()
      if row is None:
        break;

      row =  [str(element) for element in row]

      #print "DEBUG: _BasicPredictionWriter: checkpointing row: %r" % (row,)

      writer.writerow(row)

      numWritten +=1

    assert numWritten == numToWrite, \
      "numWritten ({0!s}) != numToWrite ({1!s})".format(numWritten, numToWrite)


    checkpointSink.flush()

    return
예제 #53
0
def _generateFile(filename, numRecords, categoryList, initProb, 
      firstOrderProb, secondOrderProb, seqLen, numNoise=0, resetsEvery=None):
  """ Generate a set of records reflecting a set of probabilities.
  
  Parameters:
  ----------------------------------------------------------------
  filename:         name of .csv file to generate
  numRecords:       number of records to generate
  categoryList:     list of category names
  initProb:         Initial probability for each category. This is a vector
                      of length len(categoryList).
  firstOrderProb:   A dictionary of the 1st order probabilities. The key
                      is the 1st element of the sequence, the value is
                      the probability of each 2nd element given the first. 
  secondOrderProb:  A dictionary of the 2nd order probabilities. The key
                      is the first 2 elements of the sequence, the value is
                      the probability of each possible 3rd element given the 
                      first two. 
  seqLen:           Desired length of each sequence. The 1st element will
                      be generated using the initProb, the 2nd element by the
                      firstOrder table, and the 3rd and all successive 
                      elements by the secondOrder table. None means infinite
                      length. 
  numNoise:         Number of noise elements to place between each 
                      sequence. The noise elements are evenly distributed from 
                      all categories. 
  resetsEvery:      If not None, generate a reset every N records
                      
                      
  Here is an example of some parameters:
  
  categoryList:     ['cat1', 'cat2', 'cat3']
  
  initProb:         [0.7, 0.2, 0.1]
  
  firstOrderProb:   {'[0]': [0.3, 0.3, 0.4],
                     '[1]': [0.3, 0.3, 0.4],
                     '[2]': [0.3, 0.3, 0.4]}
                     
  secondOrderProb:  {'[0,0]': [0.3, 0.3, 0.4],
                     '[0,1]': [0.3, 0.3, 0.4],
                     '[0,2]': [0.3, 0.3, 0.4],
                     '[1,0]': [0.3, 0.3, 0.4],
                     '[1,1]': [0.3, 0.3, 0.4],
                     '[1,2]': [0.3, 0.3, 0.4],
                     '[2,0]': [0.3, 0.3, 0.4],
                     '[2,1]': [0.3, 0.3, 0.4],
                     '[2,2]': [0.3, 0.3, 0.4]}
                   
  """
  
  # Create the file
  print "Creating %s..." % (filename)
  fields = [('reset', 'int', 'R'), ('name', 'string', '')]
  outFile = FileRecordStream(filename, write=True, fields=fields)
  
  # --------------------------------------------------------------------
  # Convert the probabilitie tables into cumulative probabilities
  initCumProb = initProb.cumsum()
  
  firstOrderCumProb = dict()
  for (key,value) in firstOrderProb.iteritems():
    firstOrderCumProb[key] = value.cumsum()
    
  secondOrderCumProb = dict()
  for (key,value) in secondOrderProb.iteritems():
    secondOrderCumProb[key] = value.cumsum()
    

  # --------------------------------------------------------------------
  # Write out the sequences
  elementsInSeq = []
  numElementsSinceReset = 0
  maxCatIdx = len(categoryList) - 1
  for i in xrange(numRecords):

    # Generate a reset?
    if numElementsSinceReset == 0:
      reset = 1
    else:
      reset = 0
      
    # Pick the next element, based on how are we are into the 2nd order
    #   sequence. 
    rand = numpy.random.rand()
    if len(elementsInSeq) == 0:
      catIdx = numpy.searchsorted(initCumProb, rand)
    elif len(elementsInSeq) == 1:
      catIdx = numpy.searchsorted(firstOrderCumProb[str(elementsInSeq)], rand)
    elif (len(elementsInSeq) >=2) and \
                  (seqLen is None or len(elementsInSeq) < seqLen-numNoise):
      catIdx = numpy.searchsorted(secondOrderCumProb[str(elementsInSeq[-2:])], rand)
    else:   # random "noise"
      catIdx = numpy.random.randint(len(categoryList))
      
    # Write out the record
    catIdx = min(maxCatIdx, catIdx)
    outFile.appendRecord([reset,categoryList[catIdx]])    
    #print categoryList[catIdx]
    
    # ------------------------------------------------------------
    # Increment counters
    elementsInSeq.append(catIdx)
    numElementsSinceReset += 1
    
    # Generate another reset?
    if resetsEvery is not None and numElementsSinceReset == resetsEvery:
      numElementsSinceReset = 0
      elementsInSeq = []
    
    # Start another 2nd order sequence?
    if seqLen is not None and (len(elementsInSeq) == seqLen+numNoise):
      elementsInSeq = []
      
  
  outFile.close()
예제 #54
0
파일: stats_v2.py 프로젝트: AI-Cdrone/nupic
def generateStats(filename, maxSamples = None,):
  """
  Collect statistics for each of the fields in the user input data file and
  return a stats dict object.

  Parameters:
  ------------------------------------------------------------------------------
  filename:             The path and name of the data file.
  maxSamples:           Upper bound on the number of rows to be processed
  retval:               A dictionary of dictionaries. The top level keys are the
                        field names and the corresponding values are the statistics
                        collected for the individual file.
                        Example:
                        {
                          'consumption':{'min':0,'max':90,'mean':50,...},
                          'gym':{'numDistinctCategories':10,...},
                          ...
                         }


  """
  # Mapping from field type to stats collector object
  statsCollectorMapping = {'float':    FloatStatsCollector,
                           'int':      IntStatsCollector,
                           'string':   StringStatsCollector,
                           'datetime': DateTimeStatsCollector,
                           'bool':     BoolStatsCollector,
                           }

  filename = resource_filename("nupic.datafiles", filename)
  print "*"*40
  print "Collecting statistics for file:'%s'" % (filename,)
  dataFile = FileRecordStream(filename)

  # Initialize collector objects
  # statsCollectors list holds statsCollector objects for each field
  statsCollectors = []
  for fieldName, fieldType, fieldSpecial in dataFile.getFields():
    # Find the corresponding stats collector for each field based on field type
    # and intialize an instance
    statsCollector = \
            statsCollectorMapping[fieldType](fieldName, fieldType, fieldSpecial)
    statsCollectors.append(statsCollector)

  # Now collect the stats
  if maxSamples is None:
    maxSamples = 500000
  for i in xrange(maxSamples):
    record = dataFile.getNextRecord()
    if record is None:
      break
    for i, value in enumerate(record):
      statsCollectors[i].addValue(value)

  # stats dict holds the statistics for each field
  stats = {}
  for statsCollector in statsCollectors:
    statsCollector.getStats(stats)

  # We don't want to include reset field in permutations
  # TODO: handle reset field in a clean way
  if dataFile.getResetFieldIdx() is not None:
    resetFieldName,_,_ = dataFile.getFields()[dataFile.reset]
    stats.pop(resetFieldName)

  if VERBOSITY > 0:
    pprint.pprint(stats)

  return stats
예제 #55
0
  def testSimpleMulticlassNetwork(self):
  
    # Setup data record stream of fake data (with three categories)
    filename = _getTempFileName()
    fields = [("timestamp", "datetime", "T"),
              ("value", "float", ""),
              ("reset", "int", "R"),
              ("sid", "int", "S"),
              ("categories", "list", "C")]
    records = (
      [datetime(day=1, month=3, year=2010), 0.0, 1, 0, ""],
      [datetime(day=2, month=3, year=2010), 1.0, 0, 0, "1 2"],
      [datetime(day=3, month=3, year=2010), 1.0, 0, 0, "1 2"],
      [datetime(day=4, month=3, year=2010), 2.0, 0, 0, "0"],
      [datetime(day=5, month=3, year=2010), 3.0, 0, 0, "1 2"],
      [datetime(day=6, month=3, year=2010), 5.0, 0, 0, "1 2"],
      [datetime(day=7, month=3, year=2010), 8.0, 0, 0, "0"],
      [datetime(day=8, month=3, year=2010), 13.0, 0, 0, "1 2"])
    dataSource = FileRecordStream(streamID=filename, write=True, fields=fields)
    for r in records:
      dataSource.appendRecord(list(r))

    # Create the network and get region instances.
    net = Network()
    net.addRegion("sensor", "py.RecordSensor", "{'numCategories': 3}")
    net.addRegion("classifier","py.KNNClassifierRegion",
                  "{'k': 2,'distThreshold': 0,'maxCategoryCount': 3}")
    net.link("sensor", "classifier", "UniformLink", "",
             srcOutput = "dataOut", destInput = "bottomUpIn")
    net.link("sensor", "classifier", "UniformLink", "",
             srcOutput = "categoryOut", destInput = "categoryIn")
    sensor = net.regions["sensor"]
    classifier = net.regions["classifier"]
    
    # Setup sensor region encoder and data stream.
    dataSource.close()
    dataSource = FileRecordStream(filename)
    sensorRegion = sensor.getSelf()
    sensorRegion.encoder = MultiEncoder()
    sensorRegion.encoder.addEncoder(
        "value", ScalarEncoder(21, 0.0, 13.0, n=256, name="value"))
    sensorRegion.dataSource = dataSource
    
    # Get ready to run.
    net.initialize()

    # Train the network (by default learning is ON in the classifier, but assert
    # anyway) and then turn off learning and turn on inference mode.
    self.assertEqual(classifier.getParameter("learningMode"), 1)
    net.run(8)
    classifier.setParameter("inferenceMode", 1)
    classifier.setParameter("learningMode", 0)

    # Assert learning is OFF and that the classifier learned the dataset.
    self.assertEqual(classifier.getParameter("learningMode"), 0,
        "Learning mode is not turned off.")
    self.assertEqual(classifier.getParameter("inferenceMode"), 1,
        "Inference mode is not turned on.")
    self.assertEqual(classifier.getParameter("categoryCount"), 3,
        "The classifier should count three total categories.")
    # classififer learns 12 patterns b/c there are 12 categories amongst the
    # records:
    self.assertEqual(classifier.getParameter("patternCount"), 12,
        "The classifier should've learned 12 samples in total.")

    # Test the network on the same data as it trained on; should classify with
    # 100% accuracy.
    expectedCats = ([0.0, 0.5, 0.5],
                    [0.0, 0.5, 0.5],
                    [0.0, 0.5, 0.5],
                    [0.5, 0.5, 0.0],
                    [0.0, 0.5, 0.5],
                    [0.0, 0.5, 0.5],
                    [0.5, 0.5, 0.0],
                    [0.0, 0.5, 0.5])
    dataSource.rewind()
    for i in xrange(8):
      net.run(1)
      inferredCats = classifier.getOutputData("categoriesOut")
      self.assertSequenceEqual(expectedCats[i], inferredCats.tolist(),
          "Classififer did not infer expected category probabilites for record "
          "number {}.".format(i))
    
    # Close data stream, delete file.
    dataSource.close()
    os.remove(filename)
예제 #56
0
파일: aggregator.py 프로젝트: 0x0all/nupic
def generateDataset(aggregationInfo, inputFilename, outputFilename=None):
  """Generate a dataset of aggregated values

  Parameters:
  ----------------------------------------------------------------------------
  aggregationInfo: a dictionary that contains the following entries
    - fields: a list of pairs. Each pair is a field name and an
      aggregation function (e.g. sum). The function will be used to aggregate
      multiple values during the aggregation period.

  aggregation period: 0 or more of unit=value fields; allowed units are:
        [years months] |
        [weeks days hours minutes seconds milliseconds microseconds]
        NOTE: years and months are mutually-exclusive with the other units.
              See getEndTime() and _aggregate() for more details.
        Example1: years=1, months=6,
        Example2: hours=1, minutes=30,
        If none of the period fields are specified or if all that are specified
        have values of 0, then aggregation will be suppressed, and the given
        inputFile parameter value will be returned.

  inputFilename: filename (or relative path form NTA_DATA_PATH) of
               the input dataset
               
  outputFilename: name for the output file. If not given, a name will be
        generated based on the input filename and the aggregation params
        
  retval: Name of the generated output file. This will be the same as the input
      file name if no aggregation needed to be performed
        
  

  If the input file contained a time field, sequence id field or reset field
  that were not specified in aggregationInfo fields, those fields will be
  added automatically with the following rules:

  1. The order will be R, S, T, rest of the fields
  2. The aggregation function for all will be to pick the first: lambda x: x[0]

    Returns: the path of the aggregated data file if aggregation was performed
      (in the same directory as the given input file); if aggregation did not
      need to be performed, then the given inputFile argument value is returned.
  """



  # Create the input stream
  inputFullPath = findDataset(inputFilename)
  inputObj = FileRecordStream(inputFullPath)
  

  # Instantiate the aggregator
  aggregator = Aggregator(aggregationInfo=aggregationInfo, 
                          inputFields=inputObj.getFields())
  
  
  # Is it a null aggregation? If so, just return the input file unmodified
  if aggregator.isNullAggregation():
    return inputFullPath


  # ------------------------------------------------------------------------
  # If we were not given an output filename, create one based on the 
  #  aggregation settings
  if outputFilename is None:
    outputFilename = 'agg_%s' % \
                        os.path.splitext(os.path.basename(inputFullPath))[0]
    timePeriods = 'years months weeks days '\
                  'hours minutes seconds milliseconds microseconds'
    for k in timePeriods.split():
      if aggregationInfo.get(k, 0) > 0:
        outputFilename += '_%s_%d' % (k, aggregationInfo[k])
  
    outputFilename += '.csv'
    outputFilename = os.path.join(os.path.dirname(inputFullPath), outputFilename)



  # ------------------------------------------------------------------------
  # If some other process already started creating this file, simply 
  #   wait for it to finish and return without doing anything
  lockFilePath = outputFilename + '.please_wait'
  if os.path.isfile(outputFilename) or \
     os.path.isfile(lockFilePath):
    while os.path.isfile(lockFilePath):
      print 'Waiting for %s to be fully written by another process' % \
            lockFilePath
      time.sleep(1)
    return outputFilename


  # Create the lock file
  lockFD = open(lockFilePath, 'w')



  # -------------------------------------------------------------------------
  # Create the output stream
  outputObj = FileRecordStream(streamID=outputFilename, write=True,
                               fields=inputObj.getFields())


  # -------------------------------------------------------------------------
  # Write all aggregated records to the output
  while True:
    inRecord = inputObj.getNextRecord()
    
    (aggRecord, aggBookmark) = aggregator.next(inRecord, None)
    
    if aggRecord is None and inRecord is None:
      break
    
    if aggRecord is not None:
      outputObj.appendRecord(aggRecord)

  return outputFilename
예제 #57
0
class Sensor(Node):
  """
  A super class only to group properties related to sensors.
  """

  #region Constructor

  def __init__(self, name):
    """
    Initializes a new instance of this class.
    """

    Node.__init__(self, name, NodeType.sensor)

    #region Instance fields

    self.bits = []
    """An array of the bit objects that compose the current output of this node."""

    self.dataSource = None
    """Data source which provides records to fed into a region."""

    self.dataSourceType = DataSourceType.file
    """Type of the data source (File or Database)"""

    self.fileName = ''
    """The input file name to be handled. Returns the input file name only if it is in the project directory, full path otherwise."""

    self.databaseConnectionString = ""
    """Connection string of the database."""

    self.databaseTable = ''
    """Target table of the database."""

    self.encoder = None
    """Multi-encoder which concatenate sub-encodings to convert raw data to htm input and vice-versa."""

    self.encodings = []
    """List of sub-encodings that handles the input from database"""

    self.predictionsMethod = PredictionsMethod.reconstruction
    """Method used to get predicted values and their probabilities."""

    self.enableClassificationLearning = True
    """Switch for classification learning"""

    self.enableClassificationInference = True
    """Switch for classification inference"""

    #endregion

    #region Statistics properties

    self.statsPrecisionRate = 0.

    #endregion

  #endregion

  #region Methods

  def getBit(self, x, y):
    """
    Return the bit located at given position
    """

    bit = self.bits[(y * self.width) + x]

    return bit

  def initialize(self):
    """
    Initialize this node.
    """

    Node.initialize(self)

    # Initialize input bits
    self.bits = []
    for x in range(self.width):
      for y in range(self.height):
        bit = Bit()
        bit.x = x
        bit.y = y
        self.bits.append(bit)

    if self.dataSourceType == DataSourceType.file:
      """
      Initialize this node opening the file and place cursor on the first record.
      """

      # If file name provided is a relative path, use project file path
      if self.fileName != '' and os.path.dirname(self.fileName) == '':
        fullFileName = os.path.dirname(Global.project.fileName) + '/' + self.fileName
      else:
        fullFileName = self.fileName

      # Check if file really exists
      if not os.path.isfile(fullFileName):
        QtGui.QMessageBox.warning(None, "Warning", "Input stream file '" + fullFileName + "' was not found or specified.", QtGui.QMessageBox.Ok)
        return

      # Create a data source for read the file
      self.dataSource = FileRecordStream(fullFileName)

    elif self.dataSourceType == DataSourceType.database:
      pass

    self.encoder = MultiEncoder()
    for encoding in self.encodings:
      encoding.initialize()

      # Create an instance class for an encoder given its module, class and constructor params
      encoding.encoder = getInstantiatedClass(encoding.encoderModule, encoding.encoderClass, encoding.encoderParams)

      # Take the first part of encoder field name as encoder name
      # Ex: timestamp_weekend.weekend => timestamp_weekend
      encoding.encoder.name = encoding.encoderFieldName.split('.')[0]

      # Add sub-encoder to multi-encoder list
      self.encoder.addEncoder(encoding.dataSourceFieldName, encoding.encoder)

    # If encoder size is not the same to sensor size then throws exception
    encoderSize = self.encoder.getWidth()
    sensorSize = self.width * self.height
    if encoderSize > sensorSize:
      QtGui.QMessageBox.warning(None, "Warning", "'" + self.name + "': Encoder size (" + str(encoderSize) + ") is different from sensor size (" + str(self.width) + " x " + str(self.height) + " = " + str(sensorSize) + ").", QtGui.QMessageBox.Ok)
      return

    return True

  def nextStep(self):
    """
    Performs actions related to time step progression.
    """

    # Update states machine by remove the first element and add a new element in the end
    for encoding in self.encodings:
      encoding.currentValue.rotate()
      if encoding.enableInference:
        encoding.predictedValues.rotate()
        encoding.bestPredictedValue.rotate()

    Node.nextStep(self)
    for bit in self.bits:
      bit.nextStep()

    # Get record value from data source
    # If the last record was reached just rewind it
    data = self.dataSource.getNextRecordDict()
    if not data:
      self.dataSource.rewind()
      data = self.dataSource.getNextRecordDict()

    # Pass raw values to encoder and get a concatenated array
    outputArray = numpy.zeros(self.encoder.getWidth())
    self.encoder.encodeIntoArray(data, outputArray)

    # Get values obtained from the data source.
    outputValues = self.encoder.getScalars(data)

    # Get raw values and respective encoded bit array for each field
    prevOffset = 0
    for i in range(len(self.encodings)):
      encoding = self.encodings[i]

      # Convert the value to its respective data type
      currValue = outputValues[i]
      if encoding.encoderFieldDataType == FieldDataType.boolean:
        currValue = bool(currValue)
      elif encoding.encoderFieldDataType == FieldDataType.integer:
        currValue = int(currValue)
      elif encoding.encoderFieldDataType == FieldDataType.decimal:
        currValue = float(currValue)
      elif encoding.encoderFieldDataType == FieldDataType.dateTime:
        currValue = dateutil.parser.parse(str(currValue))
      elif encoding.encoderFieldDataType == FieldDataType.string:
        currValue = str(currValue)
      encoding.currentValue.setForCurrStep(currValue)

    # Update sensor bits
    for i in range(len(outputArray)):
      if outputArray[i] > 0.:
        self.bits[i].isActive.setForCurrStep(True)
      else:
        self.bits[i].isActive.setForCurrStep(False)

    # Mark falsely predicted bits
    for bit in self.bits:
      if bit.isPredicted.atPreviousStep() and not bit.isActive.atCurrStep():
        bit.isFalselyPredicted.setForCurrStep(True)

    self._output = outputArray

  def getPredictions(self):
    """
    Get the predictions after an iteration.
    """

    if self.predictionsMethod == PredictionsMethod.reconstruction:

      # Prepare list with predictions to be classified
      # This list contains the indexes of all bits that are predicted
      output = []
      for i in range(len(self.bits)):
        if self.bits[i].isPredicted.atCurrStep():
          output.append(1)
        else:
          output.append(0)
      output = numpy.array(output)

      # Decode output and create predictions list
      fieldsDict, fieldsOrder = self.encoder.decode(output)
      for encoding in self.encodings:
        if encoding.enableInference:
          predictions = []
          encoding.predictedValues.setForCurrStep(dict())

          # If encoder field name was returned by decode(), assign the the predictions to it
          if encoding.encoderFieldName in fieldsOrder:
            predictedLabels = fieldsDict[encoding.encoderFieldName][1].split(', ')
            predictedValues = fieldsDict[encoding.encoderFieldName][0]
            for i in range(len(predictedLabels)):
              predictions.append([predictedValues[i], predictedLabels[i]])

          encoding.predictedValues.atCurrStep()[1] = predictions

          # Get the predicted value with the biggest probability to happen
          if len(predictions) > 0:
            bestPredictionRange = predictions[0][0]
            min = bestPredictionRange[0]
            max = bestPredictionRange[1]
            bestPredictedValue = (min + max) / 2.0
            encoding.bestPredictedValue.setForCurrStep(bestPredictedValue)

    elif self.predictionsMethod == PredictionsMethod.classification:
      # A classification involves estimate which are the likely values to occurs in the next time step.

      offset = 0
      for encoding in self.encodings:
        encoderWidth = encoding.encoder.getWidth()

        if encoding.enableInference:
          # Prepare list with predictions to be classified
          # This list contains the indexes of all bits that are predicted
          patternNZ = []
          for i in range(offset, encoderWidth):
            if self.bits[i].isActive.atCurrStep():
              patternNZ.append(i)

          # Get the bucket index of the current value at the encoder
          actualValue = encoding.currentValue.atCurrStep()
          bucketIdx = encoding.encoder.getBucketIndices(actualValue)[0]

          # Perform classification
          clasResults = encoding.classifier.compute(recordNum=Global.currStep, patternNZ=patternNZ, classification={'bucketIdx': bucketIdx, 'actValue': actualValue}, learn=self.enableClassificationLearning, infer=self.enableClassificationInference)

          encoding.predictedValues.setForCurrStep(dict())
          for step in encoding.steps:

            # Calculate probability for each predicted value
            predictions = dict()
            for (actValue, prob) in zip(clasResults['actualValues'], clasResults[step]):
              if actValue in predictions:
                predictions[actValue] += prob
              else:
                predictions[actValue] = prob

            # Remove predictions with low probabilities
            maxVal = (None, None)
            for (actValue, prob) in predictions.items():
              if len(predictions) <= 1:
                break
              if maxVal[0] is None or prob >= maxVal[1]:
                if maxVal[0] is not None and maxVal[1] < encoding.minProbabilityThreshold:
                  del predictions[maxVal[0]]
                maxVal = (actValue, prob)
              elif prob < encoding.minProbabilityThreshold:
                del predictions[actValue]

            # Sort the list of values from more probable to less probable values
            # an decrease the list length to max predictions per step limit
            predictions = sorted(predictions.iteritems(), key=operator.itemgetter(1), reverse=True)
            predictions = predictions[:maxFutureSteps]

            encoding.predictedValues.atCurrStep()[step] = predictions

          # Get the predicted value with the biggest probability to happen
          bestPredictedValue = encoding.predictedValues.atCurrStep()[1][0][0]
          encoding.bestPredictedValue.setForCurrStep(bestPredictedValue)

        offset += encoderWidth

  def calculateStatistics(self):
    """
    Calculate statistics after an iteration.
    """

    if Global.currStep > 0:
      precision = 0.

      # Calculate the prediction precision comparing if the current value is in the range of any prediction.
      for encoding in self.encodings:
        if encoding.enableInference:
          predictions = encoding.predictedValues.atPreviousStep()[1]
          for predictedValue in predictions:
            min = None
            max = None
            value = predictedValue[0]
            if self.predictionsMethod == PredictionsMethod.reconstruction:
              min = value[0]
              max = value[1]
            elif self.predictionsMethod == PredictionsMethod.classification:
              min = value
              max = value
            if isinstance(min, (int, long, float, complex)) and isinstance(max, (int, long, float, complex)):
              min = math.floor(min)
              max = math.ceil(max)
            if min <= encoding.currentValue.atCurrStep() <= max:
              precision = 100.
              break

      # The precision rate is the average of the precision calculated in every step
      self.statsPrecisionRate = (self.statsPrecisionRate + precision) / 2
    else:
      self.statsPrecisionRate = 0.

    for bit in self.bits:
      bit.calculateStatistics()
예제 #58
0
  def _testSamePredictions(self, experiment, predSteps, checkpointAt,
                           predictionsFilename, additionalFields=None,
                           newSerialization=False):
    """ Test that we get the same predictions out from the following two
    scenarios:

    a_plus_b: Run the network for 'a' iterations followed by 'b' iterations
    a, followed by b: Run the network for 'a' iterations, save it, load it
                      back in, then run for 'b' iterations.

    Parameters:
    -----------------------------------------------------------------------
    experiment:   base directory of the experiment. This directory should
                    contain the following:
                        base.py
                        a_plus_b/description.py
                        a/description.py
                        b/description.py
                    The sub-directory description files should import the
                    base.py and only change the first and last record used
                    from the data file.
    predSteps:   Number of steps ahead predictions are for
    checkpointAt: Number of iterations that 'a' runs for.
                 IMPORTANT: This must match the number of records that
                 a/description.py runs for - it is NOT dynamically stuffed into
                 the a/description.py.
    predictionsFilename: The name of the predictions file that the OPF
                  generates for this experiment (for example
                  'DefaulTask.NontemporalMultiStep.predictionLog.csv')
    newSerialization: Whether to use new capnproto serialization.
    """

    # Get the 3 sub-experiment directories
    aPlusBExpDir = os.path.join(_EXPERIMENT_BASE, experiment, "a_plus_b")
    aExpDir = os.path.join(_EXPERIMENT_BASE, experiment, "a")
    bExpDir = os.path.join(_EXPERIMENT_BASE, experiment, "b")

    # Run a+b
    args = self._createExperimentArgs(aPlusBExpDir,
                                      newSerialization=newSerialization)
    _aPlusBExp = runExperiment(args)

    # Run a, the copy the saved checkpoint into the b directory
    args = self._createExperimentArgs(aExpDir,
                                      newSerialization=newSerialization)
    _aExp = runExperiment(args)
    if os.path.exists(os.path.join(bExpDir, 'savedmodels')):
      shutil.rmtree(os.path.join(bExpDir, 'savedmodels'))
    shutil.copytree(src=os.path.join(aExpDir, 'savedmodels'),
                    dst=os.path.join(bExpDir, 'savedmodels'))

    args = self._createExperimentArgs(bExpDir,
                                      newSerialization=newSerialization,
                                      additionalArgs=['--load=DefaultTask'])
    _bExp = runExperiment(args)

    # Now, compare the predictions at the end of a+b to those in b.
    aPlusBPred = FileRecordStream(os.path.join(aPlusBExpDir, 'inference',
                                   predictionsFilename))
    bPred = FileRecordStream(os.path.join(bExpDir, 'inference',
                                   predictionsFilename))

    colNames = [x[0] for x in aPlusBPred.getFields()]
    actValueColIdx = colNames.index('multiStepPredictions.actual')
    predValueColIdx = colNames.index('multiStepPredictions.%d' % (predSteps))

    # Skip past the 'a' records in aPlusB
    for i in range(checkpointAt):
      aPlusBPred.next()

    # Now, read through the records that don't have predictions yet
    for i in range(predSteps):
      aPlusBPred.next()
      bPred.next()

    # Now, compare predictions in the two files
    rowIdx = checkpointAt + predSteps + 4 - 1
    epsilon = 0.0001
    while True:
      rowIdx += 1
      try:
        rowAPB = aPlusBPred.next()
        rowB = bPred.next()

        # Compare actuals
        self.assertEqual(rowAPB[actValueColIdx], rowB[actValueColIdx],
              "Mismatch in actual values: row %d of a+b has %s and row %d of "
              "b has %s" % (rowIdx, rowAPB[actValueColIdx], rowIdx-checkpointAt,
                            rowB[actValueColIdx]))

        # Compare predictions, within nearest epsilon
        predAPB = eval(rowAPB[predValueColIdx])
        predB = eval(rowB[predValueColIdx])

        # Sort with highest probabilities first
        predAPB = [(a, b) for b, a in predAPB.items()]
        predB = [(a, b) for b, a in predB.items()]
        predAPB.sort(reverse=True)
        predB.sort(reverse=True)

        if additionalFields is not None:
          for additionalField in additionalFields:
            fieldIdx = colNames.index(additionalField)
            self.assertEqual(rowAPB[fieldIdx], rowB[fieldIdx],
              "Mismatch in field \'%s\' values: row %d of a+b has value: (%s)\n"
              " and row %d of b has value: %s" % \
              (additionalField, rowIdx, rowAPB[fieldIdx],
                rowIdx-checkpointAt, rowB[fieldIdx]))

        self.assertEqual(len(predAPB), len(predB),
              "Mismatch in predicted values: row %d of a+b has %d predictions: "
              "\n  (%s) and row %d of b has %d predictions:\n  (%s)" % \
              (rowIdx, len(predAPB), predAPB, rowIdx-checkpointAt, len(predB),
               predB))

        for i in range(len(predAPB)):
          (aProb, aValue) = predAPB[i]
          (bProb, bValue) = predB[i]
          self.assertLess(abs(aValue-bValue), epsilon,
              "Mismatch in predicted values: row %d of a+b predicts value %s "
              "and row %d of b predicts %s" % (rowIdx, aValue,
                                               rowIdx-checkpointAt, bValue))
          self.assertLess(abs(aProb-bProb), epsilon,
              "Mismatch in probabilities: row %d of a+b predicts %s with "
              "probability %s and row %d of b predicts %s with probability %s" \
               % (rowIdx, aValue, aProb, rowIdx-checkpointAt, bValue, bProb))

      except StopIteration:
        break

    # clean up model checkpoint directories
    shutil.rmtree(getCheckpointParentDir(aExpDir))
    shutil.rmtree(getCheckpointParentDir(bExpDir))
    shutil.rmtree(getCheckpointParentDir(aPlusBExpDir))

    print "Predictions match!"
예제 #59
0
class StreamReader(RecordStreamIface):
  """
  Implements a stream reader. This is a high level class that owns one or more
  underlying implementations of a RecordStreamIFace. Each RecordStreamIFace
  implements the raw reading of records from the record store (which could be a
  file, hbase table or something else).

  In the future, we will support joining of two or more RecordStreamIface's (
  which is why the streamDef accepts a list of 'stream' elements), but for now
  only 1 source is supported.

  The class also implements aggregation of the (in the future) joined records
  from the sources.

  This module parses the stream definition (as defined in
  /nupic/frameworks/opf/jsonschema/stream_def.json), creates the
  RecordStreamIFace for each source ('stream's element) defined in the stream
  def, performs aggregation, and returns each record in the correct format
  according to the desired column names specified in the streamDef.

  This class implements the RecordStreamIFace interface and thus can be used
  in place of a raw record stream.

  This is an example streamDef:
    {
      'version': 1
      'info': 'test_hotgym',

      'streams': [
          {'columns': [u'*'],
           'info': u'hotGym.csv',
           'last_record': 4000,
           'source': u'file://extra/hotgym/hotgym.csv'}.
      ],

      'timeField': 'timestamp',

      'aggregation': {
        'hours': 1,
        'fields': [
            ('timestamp', 'first'),
            ('gym', 'first'),
            ('consumption', 'sum')
        ],
      }

    }

  """


  def __init__(self, streamDef, bookmark=None, saveOutput=False,
               isBlocking=True, maxTimeout=0, eofOnTimeout=False):
    """ Base class constructor, performs common initialization

    Parameters:
    ----------------------------------------------------------------
    streamDef:  The stream definition, potentially containing multiple sources
                (not supported yet). See
                /nupic/frameworks/opf/jsonschema/stream_def.json for the format
                of this dict

    bookmark: Bookmark to start reading from. This overrides the first_record
                field of the streamDef if provided.

    saveOutput: If true, save the output to a csv file in a temp directory.
                The path to the generated file can be found in the log
                output.

    isBlocking: should read operation block *forever* if the next row of data
                is not available, but the stream is not marked as 'completed'
                yet?

    maxTimeout: if isBlocking is False, max seconds to wait for more data before
                timing out; ignored when isBlocking is True.

    eofOnTimeout: If True and we get a read timeout (isBlocking must be False
                to get read timeouts), assume we've reached the end of the
                input and produce the last aggregated record, if one can be
                completed.

    """

    # Call superclass constructor
    super(StreamReader, self).__init__()

    loggerPrefix = 'com.numenta.nupic.data.StreamReader'
    self._logger = logging.getLogger(loggerPrefix)
    jsonhelpers.validate(streamDef,
                         schemaPath=pkg_resources.resource_filename(
                             jsonschema.__name__, "stream_def.json"))
    assert len(streamDef['streams']) == 1, "Only 1 source stream is supported"

    # Save constructor args
    sourceDict = streamDef['streams'][0]
    self._recordCount = 0
    self._eofOnTimeout = eofOnTimeout
    self._logger.debug('Reading stream with the def: %s', sourceDict)

    # Dictionary to store record statistics (min and max of scalars for now)
    self._stats = None

    # ---------------------------------------------------------------------
    # Get the stream definition params

    # Limiting window of the stream. It would not return any records until
    # 'first_record' ID is read (or very first with the ID above that). The
    # stream will return EOS once it reads record with ID 'last_record' or
    # above (NOTE: the name 'lastRecord' is misleading because it is NOT
    #  inclusive).
    firstRecordIdx = sourceDict.get('first_record', None)
    self._sourceLastRecordIdx = sourceDict.get('last_record', None)

    # If a bookmark was given, then override first_record from the stream
    #  definition.
    if bookmark is not None:
      firstRecordIdx = None


    # Column names must be provided in the streamdef json
    # Special case is ['*'], meaning all available names from the record stream
    self._streamFieldNames = sourceDict.get('columns', None)
    if self._streamFieldNames != None and self._streamFieldNames[0] == '*':
      self._needFieldsFiltering = False
    else:
      self._needFieldsFiltering = True

    # Types must be specified in streamdef json, or in case of the
    #  file_recod_stream types could be implicit from the file
    streamFieldTypes = sourceDict.get('types', None)
    self._logger.debug('Types from the def: %s', streamFieldTypes)
    # Validate that all types are valid
    if streamFieldTypes is not None:
      for dataType in streamFieldTypes:
        assert FieldMetaType.isValid(dataType)

    # Reset, sequence and time fields might be provided by streamdef json
    streamResetFieldName = streamDef.get('resetField', None)
    streamTimeFieldName = streamDef.get('timeField', None)
    streamSequenceFieldName = streamDef.get('sequenceIdField', None)
    self._logger.debug('r, t, s fields: %s, %s, %s', streamResetFieldName,
                                                      streamTimeFieldName,
                                                      streamSequenceFieldName)


    # =======================================================================
    # Open up the underlying record store
    dataUrl = sourceDict.get('source', None)
    assert dataUrl is not None
    self._recordStore = self._openStream(dataUrl, isBlocking, maxTimeout,
                                         bookmark, firstRecordIdx)
    assert self._recordStore is not None


    # =======================================================================
    # Prepare the data structures we need for returning just the fields
    #  the caller wants from each record
    recordStoreFields = self._recordStore.getFields()
    self._recordStoreFieldNames = self._recordStore.getFieldNames()

    if not self._needFieldsFiltering:
      self._streamFieldNames = self._recordStoreFieldNames

    # Build up the field definitions for each field. This is a list of tuples
    #  of (name, type, special)
    self._streamFields = []
    for dstIdx, name in enumerate(self._streamFieldNames):
      if name not in self._recordStoreFieldNames:
        raise RuntimeError("The column '%s' from the stream definition "
          "is not present in the underlying stream which has the following "
          "columns: %s" % (name, self._recordStoreFieldNames))

      fieldIdx = self._recordStoreFieldNames.index(name)
      fieldType = recordStoreFields[fieldIdx].type
      fieldSpecial = recordStoreFields[fieldIdx].special

      # If the types or specials were defined in the stream definition,
      #   then override what was found in the record store
      if streamFieldTypes is not None:
        fieldType = streamFieldTypes[dstIdx]

      if streamResetFieldName is not None and streamResetFieldName == name:
        fieldSpecial = FieldMetaSpecial.reset
      if streamTimeFieldName is not None and streamTimeFieldName == name:
        fieldSpecial = FieldMetaSpecial.timestamp
      if (streamSequenceFieldName is not None and
          streamSequenceFieldName == name):
        fieldSpecial = FieldMetaSpecial.sequence

      self._streamFields.append(FieldMetaInfo(name, fieldType, fieldSpecial))


    # ========================================================================
    # Create the aggregator which will handle aggregation of records before
    #  returning them.
    self._aggregator = Aggregator(
            aggregationInfo=streamDef.get('aggregation', None),
            inputFields=recordStoreFields,
            timeFieldName=streamDef.get('timeField', None),
            sequenceIdFieldName=streamDef.get('sequenceIdField', None),
            resetFieldName=streamDef.get('resetField', None))

    # We rely on the aggregator to tell us the bookmark of the last raw input
    #  that contributed to the aggregated record
    self._aggBookmark = None

    # Compute the aggregation period in terms of months and seconds
    if 'aggregation' in streamDef:
      self._aggMonthsAndSeconds = nupic.support.aggregationToMonthsSeconds(
                streamDef.get('aggregation'))
    else:
      self._aggMonthsAndSeconds = None


    # ========================================================================
    # Are we saving the generated output to a csv?
    if saveOutput:
      tmpDir = tempfile.mkdtemp()
      outFilename = os.path.join(tmpDir, "generated_output.csv")
      self._logger.info("StreamReader: Saving generated records to: '%s'" %
                        outFilename)
      self._writer = FileRecordStream(streamID=outFilename,
                                      write=True,
                                      fields=self._streamFields)
    else:
      self._writer = None


  @staticmethod
  def _openStream(dataUrl,
                  isBlocking,  # pylint: disable=W0613
                  maxTimeout,  # pylint: disable=W0613
                  bookmark,
                  firstRecordIdx):
    """Open the underlying file stream
    This only supports 'file://' prefixed paths.

    :returns: record stream instance
    :rtype: FileRecordStream
    """
    filePath = dataUrl[len(FILE_PREF):]
    if not os.path.isabs(filePath):
      filePath = os.path.join(os.getcwd(), filePath)
    return FileRecordStream(streamID=filePath,
                            write=False,
                            bookmark=bookmark,
                            firstRecord=firstRecordIdx)


  def close(self):
    """ Close the stream
    """
    return self._recordStore.close()


  def getNextRecord(self):
    """ Returns combined data from all sources (values only).
    Returns None on EOF; empty sequence on timeout.
    """


    # Keep reading from the raw input till we get enough for an aggregated
    #  record
    while True:

      # Reached EOF due to lastRow constraint?
      if self._sourceLastRecordIdx is not None  and \
          self._recordStore.getNextRecordIdx() >= self._sourceLastRecordIdx:
        preAggValues = None                             # indicates EOF
        bookmark = self._recordStore.getBookmark()

      else:
        # Get the raw record and bookmark
        preAggValues = self._recordStore.getNextRecord()
        bookmark = self._recordStore.getBookmark()

      if preAggValues == ():  # means timeout error occurred
        if self._eofOnTimeout:
          preAggValues = None  # act as if we got EOF
        else:
          return preAggValues  # Timeout indicator

      self._logger.debug('Read source record #%d: %r',
                        self._recordStore.getNextRecordIdx()-1, preAggValues)

      # Perform aggregation
      (fieldValues, aggBookmark) = self._aggregator.next(preAggValues, bookmark)

      # Update the aggregated record bookmark if we got a real record back
      if fieldValues is not None:
        self._aggBookmark = aggBookmark

      # Reached EOF?
      if preAggValues is None and fieldValues is None:
        return None

      # Return it if we have a record
      if fieldValues is not None:
        break


    # Do we need to re-order the fields in the record?
    if self._needFieldsFiltering:
      values = []
      srcDict = dict(zip(self._recordStoreFieldNames, fieldValues))
      for name in self._streamFieldNames:
        values.append(srcDict[name])
      fieldValues = values


    # Write to debug output?
    if self._writer is not None:
      self._writer.appendRecord(fieldValues)

    self._recordCount += 1

    self._logger.debug('Returning aggregated record #%d from getNextRecord(): '
                      '%r. Bookmark: %r',
                      self._recordCount-1, fieldValues, self._aggBookmark)
    return fieldValues


  def getDataRowCount(self):
    """Iterates through stream to calculate total records after aggregation.
    This will alter the bookmark state.
    """
    inputRowCountAfterAggregation = 0
    while True:
      record = self.getNextRecord()
      if record is None:
        return inputRowCountAfterAggregation
      inputRowCountAfterAggregation += 1

      if inputRowCountAfterAggregation > 10000:
        raise RuntimeError('No end of datastream found.')


  def getLastRecords(self, numRecords):
    """Saves the record in the underlying storage."""
    raise RuntimeError("Not implemented in StreamReader")


  def getRecordsRange(self, bookmark=None, range=None):
    """ Returns a range of records, starting from the bookmark. If 'bookmark'
    is None, then records read from the first available. If 'range' is
    None, all available records will be returned (caution: this could be
    a lot of records and require a lot of memory).
    """
    raise RuntimeError("Not implemented in StreamReader")


  def getNextRecordIdx(self):
    """Returns the index of the record that will be read next from
    getNextRecord()
    """
    return self._recordCount


  def recordsExistAfter(self, bookmark):
    """Returns True iff there are records left after the  bookmark."""
    return self._recordStore.recordsExistAfter(bookmark)


  def getAggregationMonthsAndSeconds(self):
    """ Returns the aggregation period of the record stream as a dict
    containing 'months' and 'seconds'. The months is always an integer and
    seconds is a floating point. Only one is allowed to be non-zero at a
    time.

    If there is no aggregation associated with the stream, returns None.

    Typically, a raw file or hbase stream will NOT have any aggregation info,
    but subclasses of RecordStreamIFace, like StreamReader, will and will
    return the aggregation period from this call. This call is used by the
    getNextRecordDict() method to assign a record number to a record given
    its timestamp and the aggregation interval

    Parameters:
    ------------------------------------------------------------------------
    retval: aggregationPeriod (as a dict) or None
              'months': number of months in aggregation period
              'seconds': number of seconds in aggregation period (as a float)
    """
    return self._aggMonthsAndSeconds


  def appendRecord(self, record, inputRef=None):
    """Saves the record in the underlying storage."""
    raise RuntimeError("Not implemented in StreamReader")


  def appendRecords(self, records, inputRef=None, progressCB=None):
    """Saves multiple records in the underlying storage."""
    raise RuntimeError("Not implemented in StreamReader")


  def removeOldData(self):
    raise RuntimeError("Not implemented in StreamReader")


  def seekFromEnd(self, numRecords):
    """Seeks to numRecords from the end and returns a bookmark to the new
    position.
    """
    raise RuntimeError("Not implemented in StreamReader")


  def getFieldNames(self):
    """ Returns all fields in all inputs (list of plain names).
    NOTE: currently, only one input is supported
    """
    return [f.name for f in self._streamFields]


  def getFields(self):
    """ Returns a sequence of nupic.data.fieldmeta.FieldMetaInfo
    name/type/special tuples for each field in the stream.
    """
    return self._streamFields


  def getBookmark(self):
    """ Returns a bookmark to the current position
    """
    return self._aggBookmark


  def clearStats(self):
    """ Resets stats collected so far.
    """
    self._recordStore.clearStats()


  def getStats(self):
    """ Returns stats (like min and max values of the fields).

    TODO: This method needs to be enhanced to get the stats on the *aggregated*
    records.
    """

    # The record store returns a dict of stats, each value in this dict is
    #  a list with one item per field of the record store
    #         {
    #           'min' : [f1_min, f2_min, f3_min],
    #           'max' : [f1_max, f2_max, f3_max]
    #         }
    recordStoreStats = self._recordStore.getStats()

    # We need to convert each item to represent the fields of the *stream*
    streamStats = dict()
    for (key, values) in recordStoreStats.items():
      fieldStats = dict(zip(self._recordStoreFieldNames, values))
      streamValues = []
      for name in self._streamFieldNames:
        streamValues.append(fieldStats[name])
      streamStats[key] = streamValues

    return streamStats


  def getError(self):
    """ Returns errors saved in the stream.
    """
    return self._recordStore.getError()


  def setError(self, error):
    """ Saves specified error in the stream.
    """
    self._recordStore.setError(error)


  def isCompleted(self):
    """ Returns True if all records have been read.
    """
    return self._recordStore.isCompleted()


  def setCompleted(self, completed=True):
    """ Marks the stream completed (True or False)
    """
    # CSV file is always considered completed, nothing to do
    self._recordStore.setCompleted(completed)


  def setTimeout(self, timeout):
    """ Set the read timeout """
    self._recordStore.setTimeout(timeout)


  def flush(self):
    """ Flush the file to disk """
    raise RuntimeError("Not implemented in StreamReader")