def testChooseMotorValueStartAt2(self):
    numElements = 5
    universe = OneDUniverse(nSensor=100, wSensor=5,
                            nMotor=100, wMotor=5)
    world = OneDWorld(universe, range(numElements))
    agent = ExhaustiveOneDAgent(world, 2)

    motorValues = []

    for _ in range(numElements ** 2):
      motorValue = agent.chooseMotorValue()
      agent.move(motorValue)
      motorValues.append(motorValue)

    self.assertEqual(motorValues,
       [-2, 2, -1, 1, 1, -1, 2, -2, -2, 1, -1, 3, -3, 4, -4, 1, 2, -2, 3, -3, 2, 1, -1, 1, -2])
    def testChooseMotorValueStartAt2(self):
        numElements = 5
        universe = OneDUniverse(nSensor=100, wSensor=5, nMotor=100, wMotor=5)
        world = OneDWorld(universe, range(numElements))
        agent = ExhaustiveOneDAgent(world, 2)

        motorValues = []

        for _ in range(numElements**2):
            motorValue = agent.chooseMotorValue()
            agent.move(motorValue)
            motorValues.append(motorValue)

        self.assertEqual(motorValues, [
            -2, 2, -1, 1, 1, -1, 2, -2, -2, 1, -1, 3, -3, 4, -4, 1, 2, -2, 3,
            -3, 2, 1, -1, 1, -2
        ])
Beispiel #3
0
def setupExperiment(n, w, numElements, numWorlds, tmParams, tpParams):
    print "Setting up experiment..."
    universe = OneDUniverse(nSensor=n, wSensor=w, nMotor=n, wMotor=w)
    runner = SensorimotorExperimentRunner(tmOverrides=tmParams,
                                          tpOverrides=tpParams,
                                          seed=RANDOM_SEED)
    exhaustiveAgents = []
    randomAgents = []
    for world in xrange(numWorlds):
        elements = range(world * numElements,
                         world * numElements + numElements)
        agent = ExhaustiveOneDAgent(OneDWorld(universe, elements), 0)
        exhaustiveAgents.append(agent)

        possibleMotorValues = range(-numElements, numElements + 1)
        possibleMotorValues.remove(0)
        agent = RandomOneDAgent(OneDWorld(universe, elements),
                                numElements / 2,
                                possibleMotorValues=possibleMotorValues,
                                seed=RANDOM_SEED)
        randomAgents.append(agent)
    print "Done setting up experiment."
    print
    return runner, exhaustiveAgents, randomAgents
Beispiel #4
0
def run(numWorlds, numElements, outputDir, params=DEFAULTS):
    # Extract params
    n = params["n"]
    w = params["w"]
    tmParams = params["tmParams"]
    tpParams = params["tpParams"]

    # Initialize output
    if not os.path.exists(outputDir):
        os.makedirs(outputDir)

    csvFilePath = os.path.join(outputDir,
                               "{0}x{1}.csv".format(numWorlds, numElements))

    # Initialize experiment
    start = time.time()
    universe = OneDUniverse(nSensor=n, wSensor=w, nMotor=n, wMotor=w)

    # Run the experiment
    with open(csvFilePath, 'wb') as csvFile:
        csvWriter = csv.writer(csvFile)

        print(
            "Experiment parameters: "
            "(# worlds = {0}, # elements = {1}, n = {2}, w = {3})".format(
                numWorlds, numElements, n, w))
        print "Temporal memory parameters: {0}".format(tmParams)
        print "Temporal pooler parameters: {0}".format(tpParams)
        print
        print "Setting up experiment..."
        runner = SensorimotorExperimentRunner(tmOverrides=tmParams,
                                              tpOverrides=tpParams)
        print "Done setting up experiment."
        print

        exhaustiveAgents = []
        randomAgents = []
        completeSequenceLength = numElements**2

        for world in xrange(numWorlds):
            elements = range(world * numElements,
                             world * numElements + numElements)

            exhaustiveAgents.append(
                ExhaustiveOneDAgent(OneDWorld(universe, elements), 0))

            possibleMotorValues = range(-numElements, numElements + 1)
            possibleMotorValues.remove(0)
            randomAgents.append(
                RandomOneDAgent(OneDWorld(universe, elements),
                                numElements / 2,
                                possibleMotorValues=possibleMotorValues))

        print "Training (worlds: {0}, elements: {1})...".format(
            numWorlds, numElements)
        print
        print "Training temporal memory..."
        sequences = runner.generateSequences(completeSequenceLength * 2,
                                             exhaustiveAgents,
                                             verbosity=VERBOSITY)
        runner.feedLayers(sequences,
                          tmLearn=True,
                          tpLearn=False,
                          verbosity=VERBOSITY,
                          showProgressInterval=SHOW_PROGRESS_INTERVAL)
        print

        print MonitorMixinBase.mmPrettyPrintMetrics(
            runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics())
        print

        print "Training temporal pooler..."
        sequences = runner.generateSequences(completeSequenceLength * 1,
                                             exhaustiveAgents,
                                             verbosity=VERBOSITY)
        runner.feedLayers(sequences,
                          tmLearn=False,
                          tpLearn=True,
                          verbosity=VERBOSITY,
                          showProgressInterval=SHOW_PROGRESS_INTERVAL)
        print
        print "Done training."
        print

        print MonitorMixinBase.mmPrettyPrintMetrics(
            runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics())
        print

        if PLOT >= 1:
            runner.tp.mmGetPlotConnectionsPerColumn(
                title="worlds: {0}, elements: {1}".format(
                    numWorlds, numElements))

        print "Testing (worlds: {0}, elements: {1})...".format(
            numWorlds, numElements)
        sequences = runner.generateSequences(completeSequenceLength / 4,
                                             randomAgents,
                                             verbosity=VERBOSITY,
                                             numSequences=4)
        runner.feedLayers(sequences,
                          tmLearn=False,
                          tpLearn=False,
                          verbosity=VERBOSITY,
                          showProgressInterval=SHOW_PROGRESS_INTERVAL)
        print "Done testing.\n"

        if VERBOSITY >= 2:
            print "Overlap:"
            print
            print runner.tp.mmPrettyPrintDataOverlap()
            print

        print MonitorMixinBase.mmPrettyPrintMetrics(
            runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics())
        print

        elapsed = int(time.time() - start)
        print "Total time: {0:2} seconds.".format(elapsed)

        header = ["# worlds", "# elements", "duration"]
        row = [numWorlds, numElements, elapsed]

        for metric in (runner.tp.mmGetDefaultMetrics() +
                       runner.tm.mmGetDefaultMetrics()):
            header += [
                "{0} ({1})".format(metric.prettyPrintTitle(), x)
                for x in ["min", "max", "sum", "mean", "stddev"]
            ]
            row += [
                metric.min, metric.max, metric.sum, metric.mean,
                metric.standardDeviation
            ]

        csvWriter.writerow(header)
        csvWriter.writerow(row)
        csvFile.flush()

    if PLOT >= 1:
        raw_input("Press any key to exit...")
                                                  [universe.nSensor],
                                                  "numActiveColumnsPerInhArea":
                                                  universe.wSensor
                                              })
        print "Done setting up experiment.\n"

        exhaustiveAgents = []
        randomAgents = []
        completeSequenceLength = numElements**2

        for world in xrange(numWorlds):
            elements = range(world * numElements,
                             world * numElements + numElements)

            exhaustiveAgents.append(
                ExhaustiveOneDAgent(OneDWorld(universe, elements), 0))

            possibleMotorValues = range(-numElements, numElements + 1)
            possibleMotorValues.remove(0)
            randomAgents.append(
                RandomOneDAgent(OneDWorld(universe, elements),
                                numElements / 2,
                                possibleMotorValues=possibleMotorValues))

        print "Training (worlds: {0}, elements: {1})...".format(
            numWorlds, numElements)
        sequences = runner.generateSequences(completeSequenceLength * 2,
                                             exhaustiveAgents,
                                             verbosity=VERBOSITY)
        runner.feedLayers(sequences,
                          tmLearn=True,