def setUp(self):
        # Initialize the universe, worlds, and agents
        nElements = 5
        wEncoders = 7
        universe = OneDUniverse(debugSensor=True,
                                debugMotor=True,
                                nSensor=nElements * wEncoders,
                                wSensor=wEncoders,
                                nMotor=wEncoders * 7,
                                wMotor=wEncoders)
        self.agents = [
            RandomOneDAgent(OneDWorld(universe, range(nElements)),
                            4,
                            possibleMotorValues=(-1, 1),
                            seed=23),
            RandomOneDAgent(OneDWorld(universe,
                                      list(reversed(range(nElements)))),
                            4,
                            possibleMotorValues=(-1, 1),
                            seed=23)
        ]

        self.experimentRunner = SensorimotorExperimentRunner(
            tmOverrides={
                "columnDimensions": [nElements * wEncoders],
                "minThreshold": wEncoders * 2,
                "maxNewSynapseCount": wEncoders * 2,
                "activationThreshold": wEncoders * 2
            },
            tpOverrides={
                "columnDimensions": [512],
                "numActiveColumnsPerInhArea": 20
            })
Ejemplo n.º 2
0
    def testChooseMotorValue(self):
        universe = OneDUniverse(nSensor=100, wSensor=5, nMotor=105, wMotor=5)
        world = OneDWorld(universe, [2, 0, 5, 15, 10], 2)
        agent = RandomOneDAgent(world,
                                possibleMotorValues=set(xrange(-10, 10)))

        for _ in range(100):
            motorValue = agent.chooseMotorValue()
            self.assertTrue(-2 <= motorValue <= 2)  # bounded by size of world

        world.move(-2)

        for _ in range(100):
            motorValue = agent.chooseMotorValue()
            self.assertTrue(0 <= motorValue <= 4)  # bounded by size of world
  def testChooseMotorValue(self):
    universe = OneDUniverse(nSensor=100, wSensor=5,
                            nMotor=105, wMotor=5)
    world = OneDWorld(universe, [2, 0, 5, 15, 10])
    agent = RandomOneDAgent(world, 2, possibleMotorValues=set(xrange(-10, 10)))

    for _ in range(100):
      motorValue = agent.chooseMotorValue()
      self.assertTrue(-2 <= motorValue <= 2)  # bounded by size of world

    agent.move(-2)

    for _ in range(100):
      motorValue = agent.chooseMotorValue()
      self.assertTrue(0 <= motorValue <= 4)  # bounded by size of world
Ejemplo n.º 4
0
    def setUp(self):
        # Initialize the universe, worlds, and agents
        nElements = 4
        nWorlds = 3
        n = 512
        w = 20
        universe = OneDUniverse(debugSensor=True,
                                debugMotor=True,
                                nSensor=n,
                                wSensor=w,
                                nMotor=n,
                                wMotor=w)
        self.agents = [
            RandomOneDAgent(OneDWorld(
                universe, range(nElements * world, nElements * (world + 1))),
                            0,
                            possibleMotorValues=(-1, 1),
                            seed=23) for world in xrange(nWorlds)
        ]

        self.experimentRunner = SensorimotorExperimentRunner(
            tmOverrides={
                "columnDimensions": [n],
                "minThreshold": w * 2,
                "maxNewSynapseCount": w * 2,
                "activationThreshold": w * 2,
                "seed": 42
            },
            tpOverrides={
                "columnDimensions": [n],
                "numActiveColumnsPerInhArea": w,
                "seed": 42
            })
    def testSingleWorldBasic(self):
        """
    Test Sensorimotor Temporal Memory learning in a single world.
    Patterns are represented as complete SDRs. No patterns are repeated.
    Prediction should be perfect.
    """
        self._init()

        universe = OneDUniverse(debugSensor=True,
                                debugMotor=True,
                                nSensor=100,
                                wSensor=10,
                                nMotor=70,
                                wMotor=10)
        world = OneDWorld(universe, [0, 1, 2, 3], 2)
        agent = RandomOneDAgent(world, possibleMotorValues=set(xrange(-3, 4)))

        sequence = self._generateSensorimotorSequences(100, [agent])
        self._feedTM(sequence)

        sequence = self._generateSensorimotorSequences(100, [agent])
        stats = self._testTM(sequence)

        self._assertAllActiveWerePredicted(stats, universe)
        self._assertAllInactiveWereUnpredicted(stats)
        self._assertSequencesOnePredictedActiveCellPerColumn(stats)
    def testMultipleWorldsSharedPatterns(self):
        """
    Test Sensorimotor Temporal Memory learning in multiple separate worlds.
    Patterns are represented as complete SDRs. Patterns are shared between
    worlds.
    All active columns should have been predicted.
    """
        self._init()

        universe = OneDUniverse(debugMotor=True,
                                nSensor=100,
                                wSensor=10,
                                nMotor=70,
                                wMotor=10)

        agents = []
        numWorlds = 5

        for i in xrange(numWorlds):
            patterns = range(4)
            self._random.shuffle(patterns)
            world = OneDWorld(universe, patterns, 2)
            agent = RandomOneDAgent(world,
                                    possibleMotorValues=set(xrange(-3, 4)))
            agents.append(agent)

        sequence = self._generateSensorimotorSequences(150, agents)
        self._feedTM(sequence)

        sequence = self._generateSensorimotorSequences(100, agents)
        stats = self._testTM(sequence)

        self._assertAllActiveWerePredicted(stats, universe)
        self.assertTrue(0 < stats.predictedInactiveColumns.average < 10)
    def testMultipleWorldsBasic(self):
        """
    Test Sensorimotor Temporal Memory learning in multiple separate worlds.
    Patterns are represented as complete SDRs. No patterns are repeated.
    Prediction should be perfect.
    """
        self._init()

        universe = OneDUniverse(debugMotor=True,
                                nSensor=100,
                                wSensor=10,
                                nMotor=70,
                                wMotor=10)

        agents = []
        numWorlds = 5
        sequenceLength = 4

        for i in xrange(numWorlds):
            start = i * sequenceLength
            patterns = range(start, start + sequenceLength)
            world = OneDWorld(universe, patterns, 2)
            agent = RandomOneDAgent(world,
                                    possibleMotorValues=set(xrange(-3, 4)))
            agents.append(agent)

        sequence = self._generateSensorimotorSequences(150, agents)
        self._feedTM(sequence)

        sequence = self._generateSensorimotorSequences(100, agents)
        stats = self._testTM(sequence)

        self._assertAllActiveWerePredicted(stats, universe)
        self._assertAllInactiveWereUnpredicted(stats)
        self._assertSequencesOnePredictedActiveCellPerColumn(stats)
    def testSingleWorldOneBitPerPattern(self):
        """
    Test Sensorimotor Temporal Memory learning in a single world.
    Patterns (sensor and motor) are represented with one active bit per pattern.
    """
        self._init({
            "columnDimensions": [4],
            "minThreshold": 2,
            "activationThreshold": 2
        })

        universe = OneDUniverse(debugSensor=True,
                                debugMotor=True,
                                nSensor=4,
                                wSensor=1,
                                nMotor=3,
                                wMotor=1)
        world = OneDWorld(universe, [0, 1, 2, 3], 2)
        agent = RandomOneDAgent(world, possibleMotorValues=set(xrange(-1, 2)))

        sequence = self._generateSensorimotorSequences(100, [agent])
        self._feedTM(sequence)

        sequence = self._generateSensorimotorSequences(20, [agent])
        stats = self._testTM(sequence)

        self._assertAllActiveWerePredicted(stats, universe)
        self._assertAllInactiveWereUnpredicted(stats)
        self._assertSequencesOnePredictedActiveCellPerColumn(stats)
Ejemplo n.º 9
0
    def testGenerateSensorimotorSequence(self):
        universe = OneDUniverse(nSensor=100, wSensor=5, nMotor=105, wMotor=5)
        world = OneDWorld(universe, [2, 0, 5, 15, 10], 2)
        agent = RandomOneDAgent(world,
                                possibleMotorValues=set(xrange(-10, 10)))

        sensorSequence, motorSequence, sensorimotorSequence = (
            agent.generateSensorimotorSequence(20))

        self.assertEqual(len(sensorSequence), 20)
        self.assertEqual(len(motorSequence), 20)
        self.assertEqual(len(sensorimotorSequence), 20)

        # Ensure each encoded pattern has the correct number of ON bits
        for i in range(20):
            self.assertEqual(len(sensorSequence[i]), 5)
            self.assertEqual(len(motorSequence[i]), 5)
            self.assertEqual(len(sensorimotorSequence[i]), 10)
  def testGenerateSensorimotorSequence(self):
    universe = OneDUniverse(nSensor=100, wSensor=5,
                            nMotor=105, wMotor=5)
    world = OneDWorld(universe, [2, 0, 5, 15, 10])
    agent = RandomOneDAgent(world, 2, possibleMotorValues=set(xrange(-10, 10)))

    sensorSequence, motorSequence, sensorimotorSequence = (
      agent.generateSensorimotorSequence(20)
    )

    self.assertEqual(len(sensorSequence), 20)
    self.assertEqual(len(motorSequence), 20)
    self.assertEqual(len(sensorimotorSequence), 20)

    # Ensure each encoded pattern has the correct number of ON bits
    for i in range(20):
      self.assertEqual(len(sensorSequence[i]), 5)
      self.assertEqual(len(motorSequence[i]), 5)
      self.assertEqual(len(sensorimotorSequence[i]), 10)
Ejemplo n.º 11
0
    def testDistanceToBoundaries(self):
        universe = OneDUniverse(debugSensor=True,
                                debugMotor=True,
                                nSensor=100,
                                wSensor=5,
                                nMotor=25,
                                wMotor=5)
        world = OneDWorld(universe, [2, 0, 5, 15, 10])
        agent = RandomOneDAgent(world, 2)
        self.assertEqual(agent.distanceToBoundaries(), (2, 2))

        agent.move(-2)
        self.assertEqual(agent.distanceToBoundaries(), (0, 4))

        agent.move(2)
        agent.move(2)
        self.assertEqual(agent.distanceToBoundaries(), (4, 0))
  def testDistanceToBoundaries(self):
    universe = OneDUniverse(debugSensor=True, debugMotor=True,
                            nSensor=100, wSensor=5,
                            nMotor=25, wMotor=5)
    world = OneDWorld(universe, [2, 0, 5, 15, 10])
    agent = RandomOneDAgent(world, 2)
    self.assertEqual(agent.distanceToBoundaries(), (2, 2))

    agent.move(-2)
    self.assertEqual(agent.distanceToBoundaries(), (0, 4))

    agent.move(2)
    agent.move(2)
    self.assertEqual(agent.distanceToBoundaries(), (4, 0))
Ejemplo n.º 13
0
    def testMotion(self):
        universe = OneDUniverse(debugSensor=True,
                                debugMotor=True,
                                nSensor=100,
                                wSensor=5,
                                nMotor=100,
                                wMotor=20)
        world = OneDWorld(universe, [2, 0, 5, 15, 10])
        agent = RandomOneDAgent(world, 2)

        self.assertEqual(set(xrange(25, 30)), agent.sense())

        self.assertEqual(agent.move(1), set(xrange(60, 80)))
        self.assertEqual(set(xrange(75, 80)), agent.sense())

        self.assertEqual(agent.move(-2), set(xrange(0, 20)))
        self.assertEqual(set(xrange(0, 5)), agent.sense())

        self.assertEqual(agent.move(0), set(xrange(40, 60)))
        self.assertEqual(set(xrange(0, 5)), agent.sense())
  def testMotion(self):
    universe = OneDUniverse(debugSensor=True, debugMotor=True,
                            nSensor=100, wSensor=5,
                            nMotor=100, wMotor=20)
    world = OneDWorld(universe, [2, 0, 5, 15, 10])
    agent = RandomOneDAgent(world, 2)

    self.assertEqual(set(xrange(25, 30)), agent.sense())

    self.assertEqual(agent.move(1), set(xrange(60, 80)))
    self.assertEqual(set(xrange(75, 80)), agent.sense())

    self.assertEqual(agent.move(-2), set(xrange(0, 20)))
    self.assertEqual(set(xrange(0, 5)), agent.sense())

    self.assertEqual(agent.move(0), set(xrange(40, 60)))
    self.assertEqual(set(xrange(0, 5)), agent.sense())
Ejemplo n.º 15
0
    def testMultipleWorldsSharedPatternsNoSharedSubsequencesWithSelfMovement(
            self):
        """Test Sensorimotor Temporal Memory learning in multiple separate worlds.
    Patterns are represented as complete SDRs. Patterns are shared between
    worlds. Worlds have no shared subsequences. Allows movements with value 0
    (self-movements).
    All active columns should have been predicted.
    All inactive columns should have been unpredicted.
    Patterns in different worlds should have different cell representations.
    """
        self._init()

        universe = OneDUniverse(debugSensor=True,
                                debugMotor=True,
                                nSensor=100,
                                wSensor=10,
                                nMotor=70,
                                wMotor=10)

        agents = []
        patterns = range(4)
        for _ in xrange(2):
            world = OneDWorld(universe, patterns)
            agent = RandomOneDAgent(world,
                                    2,
                                    possibleMotorValues=set(xrange(-3, 4)))
            agents.append(agent)
            patterns = list(patterns)  # copy
            patterns.reverse()

        sequence = self._generateSensorimotorSequences(150, agents)
        self._feedTM(sequence)

        sequence = self._generateSensorimotorSequences(100, agents)
        self._testTM(sequence)

        self._assertAllActiveWerePredicted(universe)
        predictedInactiveColumnsMetric = self.tm.mmGetMetricFromTrace(
            self.tm.mmGetTracePredictedInactiveColumns())
        # Note: There will be extra predictions because transitions are shared
        # between the worlds (the self-movements)
        self.assertTrue(0 < predictedInactiveColumnsMetric.mean < 5)
        self._assertSequencesOnePredictedActiveCellPerColumn()
        self._assertOneSequencePerPredictedActiveCell()
    def testMultipleWorldsSharedPatternsNoSharedSubsequences(self):
        """
    Test Sensorimotor Temporal Memory learning in multiple separate worlds.
    Patterns are represented as complete SDRs. Patterns are shared between
    worlds. Worlds have no shared subsequences.
    All active columns should have been predicted.
    Patterns in different worlds should have different cell representations
    """
        self._init()

        universe = OneDUniverse(debugSensor=True,
                                debugMotor=True,
                                nSensor=100,
                                wSensor=10,
                                nMotor=70,
                                wMotor=10)

        agents = []
        patterns = range(4)
        for _ in xrange(2):
            world = OneDWorld(universe, patterns, 2)
            agent = RandomOneDAgent(world,
                                    possibleMotorValues=set(
                                        [-3, -2, -1, 1, 2, 3]))
            agents.append(agent)
            patterns = list(patterns)  # copy
            patterns.reverse()

        sequence = self._generateSensorimotorSequences(150, agents)
        self._feedTM(sequence)

        sequence = self._generateSensorimotorSequences(100, agents)
        stats = self._testTM(sequence)

        self._assertAllActiveWerePredicted(stats, universe)
        self.assertTrue(0 < stats.predictedInactiveColumns.average < 5)
        self._assertSequencesOnePredictedActiveCellPerColumn(stats)
        self._assertOneSequencePerPredictedActiveCell(stats)
Ejemplo n.º 17
0
def setupExperiment(n, w, numElements, numWorlds, tmParams, tpParams):
  print "Setting up experiment..."
  universe = OneDUniverse(nSensor=n, wSensor=w,
                          nMotor=n, wMotor=w)
  runner = SensorimotorExperimentRunner(tmOverrides=tmParams,
                                        tpOverrides=tpParams,
                                        seed=RANDOM_SEED)
  exhaustiveAgents = []
  randomAgents = []
  for world in xrange(numWorlds):
    elements = range(world * numElements, world * numElements + numElements)
    # agent = ExhaustiveOneDAgent(OneDWorld(universe, elements), 0)
    # exhaustiveAgents.append(agent)

    possibleMotorValues = range(-numElements, numElements + 1)
    possibleMotorValues.remove(0)
    agent = RandomOneDAgent(OneDWorld(universe, elements), numElements / 2,
                            possibleMotorValues=possibleMotorValues,
                            seed=RANDOM_SEED)
    randomAgents.append(agent)
  print "Done setting up experiment."
  print
  return runner, exhaustiveAgents, randomAgents
Ejemplo n.º 18
0
    def testMultipleWorldsSharedPatterns(self):
        """Test Sensorimotor Temporal Memory learning in multiple separate worlds.
    Patterns are represented as complete SDRs. Patterns are shared between
    worlds.
    All active columns should have been predicted.
    """
        self._init()

        universe = OneDUniverse(debugSensor=True,
                                debugMotor=True,
                                nSensor=100,
                                wSensor=10,
                                nMotor=70,
                                wMotor=10)

        agents = []
        patternSets = [[3, 2, 1, 0], [0, 2, 1, 3], [1, 2, 0, 3], [3, 0, 2, 1],
                       [1, 0, 2, 3]]

        for patterns in patternSets:
            world = OneDWorld(universe, patterns)
            agent = RandomOneDAgent(world,
                                    2,
                                    possibleMotorValues=set(
                                        [-3, -2, -1, 1, 2, 3]))
            agents.append(agent)

        sequence = self._generateSensorimotorSequences(150, agents)
        self._feedTM(sequence)

        sequence = self._generateSensorimotorSequences(100, agents)
        self._testTM(sequence)

        self._assertAllActiveWerePredicted(universe)
        predictedInactiveColumnsMetric = self.tm.mmGetMetricFromTrace(
            self.tm.mmGetTracePredictedInactiveColumns())
        self.assertTrue(0 < predictedInactiveColumnsMetric.mean < 5)
        exhaustiveAgents = []
        randomAgents = []
        completeSequenceLength = numElements**2

        for world in xrange(numWorlds):
            elements = range(world * numElements,
                             world * numElements + numElements)

            exhaustiveAgents.append(
                ExhaustiveOneDAgent(OneDWorld(universe, elements), 0))

            possibleMotorValues = range(-numElements, numElements + 1)
            possibleMotorValues.remove(0)
            randomAgents.append(
                RandomOneDAgent(OneDWorld(universe, elements),
                                numElements / 2,
                                possibleMotorValues=possibleMotorValues))

        print "Training (worlds: {0}, elements: {1})...".format(
            numWorlds, numElements)
        sequences = runner.generateSequences(completeSequenceLength * 2,
                                             exhaustiveAgents,
                                             verbosity=VERBOSITY)
        runner.feedLayers(sequences,
                          tmLearn=True,
                          tpLearn=True,
                          verbosity=VERBOSITY,
                          showProgressInterval=SHOW_PROGRESS_INTERVAL)
        print "Done training.\n"

        print "Testing (worlds: {0}, elements: {1})...".format(
Ejemplo n.º 20
0
############################################################
# Initialize the universe, worlds, and agents
nElements = 20
wEncoders = 21
universe = OneDUniverse(debugSensor=False,
                        debugMotor=False,
                        nSensor=512,
                        wSensor=wEncoders,
                        nMotor=wEncoders * 7,
                        wMotor=wEncoders)

# Initialize a bunch of worlds, each with at most 8 elements
agents = [
    RandomOneDAgent(OneDWorld(universe, range(8), 4),
                    possibleMotorValues=(-2, -1, 1, 2),
                    seed=23),
    RandomOneDAgent(OneDWorld(universe, range(8 - 1, -1, -1), 4),
                    possibleMotorValues=(-2, -1, 1, 2),
                    seed=42),
    RandomOneDAgent(OneDWorld(universe, range(0, 16, 2), 4),
                    possibleMotorValues=(-2, -1, 1, 2),
                    seed=10),
    RandomOneDAgent(OneDWorld(universe, range(0, 15, 3), 2),
                    possibleMotorValues=(-2, -1, 1, 2),
                    seed=5),
    RandomOneDAgent(OneDWorld(universe, range(0, 20, 4), 2),
                    possibleMotorValues=(-2, -1, 1, 2),
                    seed=5),
    RandomOneDAgent(OneDWorld(universe, [0, 8, 3, 1, 6], 2),
                    possibleMotorValues=(-2, -1, 1, 2),
Ejemplo n.º 21
0
    return tm.getStatistics()


# Initialize the universe, worlds, and agents
nElements = 10
wEncoders = 7
universe = OneDUniverse(debugSensor=True,
                        debugMotor=True,
                        nSensor=nElements * wEncoders,
                        wSensor=wEncoders,
                        nMotor=wEncoders * 7,
                        wMotor=wEncoders)
agents = [
    RandomOneDAgent(OneDWorld(universe, range(nElements)),
                    4,
                    possibleMotorValues=(-1, 1),
                    seed=23),
    RandomOneDAgent(OneDWorld(universe, range(nElements - 1, -1, -1)),
                    4,
                    possibleMotorValues=(-1, 1),
                    seed=42),
    RandomOneDAgent(OneDWorld(universe, range(0, nElements, 2)),
                    4,
                    possibleMotorValues=(-1, 1),
                    seed=10),
    RandomOneDAgent(OneDWorld(universe, range(0, nElements, 3)),
                    2,
                    possibleMotorValues=(-1, 1),
                    seed=5),
]
Ejemplo n.º 22
0
def run(numWorlds, numElements, outputDir, params=DEFAULTS):
    # Extract params
    n = params["n"]
    w = params["w"]
    tmParams = params["tmParams"]
    tpParams = params["tpParams"]

    # Initialize output
    if not os.path.exists(outputDir):
        os.makedirs(outputDir)

    csvFilePath = os.path.join(outputDir,
                               "{0}x{1}.csv".format(numWorlds, numElements))

    # Initialize experiment
    start = time.time()
    universe = OneDUniverse(nSensor=n, wSensor=w, nMotor=n, wMotor=w)

    # Run the experiment
    with open(csvFilePath, 'wb') as csvFile:
        csvWriter = csv.writer(csvFile)

        print(
            "Experiment parameters: "
            "(# worlds = {0}, # elements = {1}, n = {2}, w = {3})".format(
                numWorlds, numElements, n, w))
        print "Temporal memory parameters: {0}".format(tmParams)
        print "Temporal pooler parameters: {0}".format(tpParams)
        print
        print "Setting up experiment..."
        runner = SensorimotorExperimentRunner(tmOverrides=tmParams,
                                              tpOverrides=tpParams)
        print "Done setting up experiment."
        print

        exhaustiveAgents = []
        randomAgents = []
        completeSequenceLength = numElements**2

        for world in xrange(numWorlds):
            elements = range(world * numElements,
                             world * numElements + numElements)

            exhaustiveAgents.append(
                ExhaustiveOneDAgent(OneDWorld(universe, elements), 0))

            possibleMotorValues = range(-numElements, numElements + 1)
            possibleMotorValues.remove(0)
            randomAgents.append(
                RandomOneDAgent(OneDWorld(universe, elements),
                                numElements / 2,
                                possibleMotorValues=possibleMotorValues))

        print "Training (worlds: {0}, elements: {1})...".format(
            numWorlds, numElements)
        print
        print "Training temporal memory..."
        sequences = runner.generateSequences(completeSequenceLength * 2,
                                             exhaustiveAgents,
                                             verbosity=VERBOSITY)
        runner.feedLayers(sequences,
                          tmLearn=True,
                          tpLearn=False,
                          verbosity=VERBOSITY,
                          showProgressInterval=SHOW_PROGRESS_INTERVAL)
        print

        print MonitorMixinBase.mmPrettyPrintMetrics(
            runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics())
        print

        print "Training temporal pooler..."
        sequences = runner.generateSequences(completeSequenceLength * 1,
                                             exhaustiveAgents,
                                             verbosity=VERBOSITY)
        runner.feedLayers(sequences,
                          tmLearn=False,
                          tpLearn=True,
                          verbosity=VERBOSITY,
                          showProgressInterval=SHOW_PROGRESS_INTERVAL)
        print
        print "Done training."
        print

        print MonitorMixinBase.mmPrettyPrintMetrics(
            runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics())
        print

        if PLOT >= 1:
            runner.tp.mmGetPlotConnectionsPerColumn(
                title="worlds: {0}, elements: {1}".format(
                    numWorlds, numElements))

        print "Testing (worlds: {0}, elements: {1})...".format(
            numWorlds, numElements)
        sequences = runner.generateSequences(completeSequenceLength / 4,
                                             randomAgents,
                                             verbosity=VERBOSITY,
                                             numSequences=4)
        runner.feedLayers(sequences,
                          tmLearn=False,
                          tpLearn=False,
                          verbosity=VERBOSITY,
                          showProgressInterval=SHOW_PROGRESS_INTERVAL)
        print "Done testing.\n"

        if VERBOSITY >= 2:
            print "Overlap:"
            print
            print runner.tp.mmPrettyPrintDataOverlap()
            print

        print MonitorMixinBase.mmPrettyPrintMetrics(
            runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics())
        print

        elapsed = int(time.time() - start)
        print "Total time: {0:2} seconds.".format(elapsed)

        header = ["# worlds", "# elements", "duration"]
        row = [numWorlds, numElements, elapsed]

        for metric in (runner.tp.mmGetDefaultMetrics() +
                       runner.tm.mmGetDefaultMetrics()):
            header += [
                "{0} ({1})".format(metric.prettyPrintTitle(), x)
                for x in ["min", "max", "sum", "mean", "stddev"]
            ]
            row += [
                metric.min, metric.max, metric.sum, metric.mean,
                metric.standardDeviation
            ]

        csvWriter.writerow(header)
        csvWriter.writerow(row)
        csvFile.flush()

    if PLOT >= 1:
        raw_input("Press any key to exit...")
Ejemplo n.º 23
0
    return tm.getStatistics()


# Initialize the universe, worlds, and agents
nElements = 5
wEncoders = 7
universe = OneDUniverse(debugSensor=True,
                        debugMotor=True,
                        nSensor=nElements * wEncoders,
                        wSensor=wEncoders,
                        nMotor=wEncoders * 7,
                        wMotor=wEncoders)
agents = [
    RandomOneDAgent(OneDWorld(universe, range(nElements), 4),
                    possibleMotorValues=(-1, 1),
                    seed=23),
]

# The TM parameters
DEFAULT_TM_PARAMS = {
    "columnDimensions": [nElements * wEncoders],
    "cellsPerColumn": 8,
    "initialPermanence": 0.5,
    "connectedPermanence": 0.6,
    "minThreshold": wEncoders * 2,
    "maxNewSynapseCount": wEncoders * 2,
    "permanenceIncrement": 0.1,
    "permanenceDecrement": 0.02,
    "activationThreshold": wEncoders * 2
}
the number of predicted columns matches the actual columns.
"""

############################################################
# Initialize the universe, worlds, and agents
nElements = 5
wEncoders = 7
universe = OneDUniverse(debugSensor=True,
                        debugMotor=True,
                        nSensor=nElements * wEncoders,
                        wSensor=wEncoders,
                        nMotor=wEncoders * 7,
                        wMotor=wEncoders)
agents = [
    RandomOneDAgent(OneDWorld(universe, range(nElements)),
                    4,
                    possibleMotorValues=(-1, 1),
                    seed=23),
    RandomOneDAgent(OneDWorld(universe, list(reversed(range(nElements)))),
                    4,
                    possibleMotorValues=(-1, 1),
                    seed=23),
]

l3NumColumns = 512
l3NumActiveColumnsPerInhArea = 20

############################################################
# Initialize the experiment runner with relevant parameters
smer = SensorimotorExperimentRunner(tmOverrides={
    "columnDimensions": [nElements * wEncoders],
    "minThreshold": wEncoders * 2,