Esempio n. 1
0
    def __iter__(self):

        for i in range(self.length):

            state = random.randint(0, 1)
            if state == 0:
                yield nd.NumDict({feature("A"): 1, feature("B"): 0})
            else:
                yield nd.NumDict({feature("A"): 0, feature("B"): 1})
Esempio n. 2
0
    def reinforcements(stimulus, actions):
        """
        Compute ABTask reinforcements.

        Returns a reward of 1 if the stimulus prompt and action match, -1 if 
        there is a mismatch, and 0 if the standby action is taken.
        """

        actions = nd.keep(actions,
                          keys={
                              feature("respond", "A"),
                              feature("respond", "B"),
                              feature("respond", "standby")
                          })

        stimulus = nd.keep(stimulus, keys={feature("A"), feature("B")})
        correct = nd.MutableNumDict({feature("respond", "standby"): 0})
        _correct = nd.transform_keys(
            stimulus,
            func=lambda s: feature("respond", s.tag),
        )
        correct.update(2 * _correct - 1)

        r = nd.sum_by(correct * actions,
                      keyfunc=lambda a: feature(("r", "respond")))

        return r
Esempio n. 3
0
    def test_config_calls_update_only_at_end_of_with_block(self):

        with mock.patch('pyClarion.base.Domain.update'):
            with mock.patch('pyClarion.base.Domain._config', ("A", "B", "C")):
                mockDom = clb.Domain(features=(feature("x", "a"),
                                               feature("y", "b"),
                                               feature("z", "c"), feature("d"),
                                               feature("e")))

                with mockDom.config():
                    mockDom.update.assert_not_called()
                    mockDom.A = "a"
                    mockDom.update.assert_not_called()
                    mockDom.B = "b"
                    mockDom.update.assert_not_called()
                    mockDom.C = "c"
                    mockDom.update.assert_not_called()
                mockDom.update.assert_called()
Esempio n. 4
0
    def test_lock_disallows_mutation_of_domain(self):

        with mock.patch('pyClarion.base.Domain._config', ("A", "B", "C")):
            mockDom = clb.Domain(features=(feature("x", "a"),
                                           feature("y", "b"),
                                           feature("z", "c"), feature("d"),
                                           feature("e")))

            mockDom.lock()
            with self.subTest(msg="run_with_config()"):
                with mockDom.config():

                    with self.assertRaisesRegex(
                            RuntimeError, "Cannot mutate locked domain."):
                        mockDom.A = "a"

            with self.subTest(msg="run_without_config()"):
                with self.assertRaisesRegex(RuntimeError,
                                            "Cannot mutate locked domain."):
                    mockDom.B = "b"
Esempio n. 5
0
    def test_goal_stay_interface_init_succeeds_under_normal_input(self):

        # baseline; make sure it works then test w/ pathological inputs
        interface = GoalStay.Interface(name="gctl",
                                       goals=(feature("goal", "select"),
                                              feature("goal", "analyze"),
                                              feature("goal", "evaluate"),
                                              feature("gobj", "attribute"),
                                              feature("gobj", "response"),
                                              feature("gobj", "pattern")))
Esempio n. 6
0
# This simulation demonstrates a basic recipe for creating and working with
# working memory structures in pyClarion to guide action selection.

# Here is the scenario:
# Alice has learned about fruits, as demonstrated in the chunk extraction
# example. She is now presented some strange new fruits and attempts to
# identify them.

### Knowledge Setup ###

# For this simulation, we continue using the fruit-related visual feature
# domain from `chunk_extraction.py`.

visual_domain = Domain(
    features=(feature("color", "red"), feature("color", "green"),
              feature("color", "yellow"), feature("color", "orange"),
              feature("color", "purple"), feature("shape", "round"),
              feature("shape", "oblong"), feature("size", "small"),
              feature("size", "medium"), feature("texture", "smooth"),
              feature("texture", "grainy"), feature("texture", "spotty")))

# We will have word features do double duty as perceptual representations and
# action features. The idea here is that when a word feature is selected as an
# action, the agent is assumed to have uttered that word (e.g., in response to
# the question "What is the fruit that you see?").

speech_interface = Interface(cmds=(
    feature("word", "//"),  # Silence
    feature("word", "/banana/"),
    feature("word", "/apple/"),
Esempio n. 7
0
    def test_disjoint_recognize_overlaps(self):

        dom1 = clb.Domain(features=(feature("x", "a"), feature("y", "b"),
                                    feature("z", "c"), feature("d"),
                                    feature("e")))

        dom2 = clb.Domain(features=(feature("z", "a"), feature("x", "b"),
                                    feature("y", "c"), feature("f"),
                                    feature("g")))

        dom3 = clb.Domain(features=(feature("x", "d"), feature("b", "y"),
                                    feature("z", "c"), feature("f")))

        dom0 = clb.Domain(features=())

        dom4 = clb.Domain(features=(feature("1", "2"), feature("3", "4"),
                                    feature("5"), feature("6")))

        with self.subTest(msg="different domains with same size"):
            self.assertEqual(clb.Domain.disjoint(dom1, dom2), True)

        with self.subTest(msg="different domains with different size"):
            self.assertEqual(clb.Domain.disjoint(dom1, dom3), False)

        with self.subTest(msg="more than 2 domains as argument"):
            self.assertEqual(clb.Domain.disjoint(dom1, dom2, dom4), True)

        with self.subTest(msg="one of the domains being empty"):
            self.assertEqual(clb.Domain.disjoint(dom1, dom0), True)

        with self.subTest(msg="only 1 domain as argument"):
            self.assertEqual(clb.Domain.disjoint(dom1), True)

        with self.subTest(msg="without argument"):
            with self.assertRaisesRegex(
                    ValueError, "disjoint\(\) doesn't accept 0 argument"):
                clb.Domain.disjoint()
Esempio n. 8
0
    def test_parse_commands_runtime_error(self):

        test_interface = clb.Interface(cmds=(
            feature("down", 1),
            feature("down", 0),
            feature("up", 0),
            feature("up", 1),
        ), )

        with self.subTest(msg="unexpected default strength"):

            data = nd.NumDict({
                feature("down", 1): 1.0,
                feature("up", 0): 1.0
            },
                              default=1)

            with self.assertRaises(ValueError):
                res = test_interface.parse_commands(data)

        with self.subTest(msg="Encounter non-integral cmd strength"):

            data = nd.NumDict({
                feature("down", 1): 0.5,
                feature("up", 0): 1.0
            },
                              default=0)

            with self.assertRaises(ValueError):
                res = test_interface.parse_commands(data)

        with self.subTest(msg="Encounter multiple values from a single dim"):

            data = nd.NumDict(
                {
                    feature("down", 1): 1.0,
                    feature("down", 0): 1.0,
                    feature("down", 2): 1.0,
                    feature("up", 0): 1.0
                },
                default=0)

            with self.assertRaises(ValueError):
                res = test_interface.parse_commands(data)
Esempio n. 9
0
    def test_parse_commands(self):

        with self.subTest(msg="classic test"):
            test_interface = clb.Interface(cmds=(feature("up",
                                                         0), feature("up", 1),
                                                 feature("down", 0),
                                                 feature("down", 1)), )
            data = nd.NumDict({
                feature("up", 1): 1.0,
                feature("down", 0): 1.0
            },
                              default=0)

            res = test_interface.parse_commands(data)
            self.assert_parse_result(
                [feature("up", 1), feature("down", 0)], res)

        with self.subTest(msg="different order in cmds doesn't matter"):
            test_interface = clb.Interface(cmds=(feature("down", 1),
                                                 feature("down",
                                                         0), feature("up", 0),
                                                 feature("up", 1)), )
            data = nd.NumDict({
                feature("down", 1): 1.0,
                feature("up", 0): 1.0
            },
                              default=0)

            res = test_interface.parse_commands(data)
            self.assert_parse_result(
                [feature("down", 1), feature("up", 0)], res)

        with self.subTest(msg="more randomness in cmds"):
            test_interface = clb.Interface(cmds=(
                feature("down", 1),
                feature("down", 0),
                feature("up", 0),
                feature("up", 1),
                feature("left", 1),
                feature("left", 0),
                feature("right", 0),
                feature("right", 1),
            ), )
            data = nd.NumDict(
                {
                    feature("down", 1): 1.0,
                    feature("up", 0): 1.0,
                    feature("left", 0): 1.0,
                    feature("right", 0): 1.0
                },
                default=0)

            res = test_interface.parse_commands(data)
            self.assert_parse_result([
                feature("down", 1),
                feature("up", 0),
                feature("left", 0),
                feature("right", 0)
            ], res)

        with self.subTest(msg="cmds size == 1"):
            test_interface = clb.Interface(cmds=(feature("up", 1), ), )
            data = nd.NumDict({feature("up", 0): 1.0}, default=0)

            res = test_interface.parse_commands(data)
            self.assert_parse_result([feature("up", 1)], res)

        with self.subTest(msg="cmds size == 0"):
            test_interface = clb.Interface(cmds=(), )
            data = nd.NumDict({}, default=0)

            res = test_interface.parse_commands(data)
            self.assert_parse_result([], res)
Esempio n. 10
0
# a new chunk is recommended. These recommendations are then placed in the
# corresponding chunk database by an updater object.

# Here is the scenario:
# We are teaching Alice about fruits by showing her pictures of fruits and
# simultaneously speaking out their names. Afterwards, we quiz alice by either
# showing her more pictures or naming fruits.

### Knowledge Setup ###

# For this simulation, we develop a simple feature domain containing visual and
# auditory features. The visual features include color, shape, size, and
# texture. The only auditory dimension is that of words.

fspecs = [
    feature("word", "/banana/"),
    feature("word", "/apple/"),
    feature("word", "/orange/"),
    feature("word", "/plum/"),
    feature("color", "red"),
    feature("color", "green"),
    feature("color", "yellow"),
    feature("color", "orange"),
    feature("color", "purple"),
    feature("shape", "round"),
    feature("shape", "oblong"),
    feature("size", "small"),
    feature("size", "medium"),
    feature("texture", "smooth"),
    feature("texture", "grainy"),
    feature("texture", "spotty")
Esempio n. 11
0
    def test_goal_buffer_push(self):

        # TODO: Add assertions...

        interface = GoalStay.Interface(name="gctl",
                                       goals=(feature("goal", "select"),
                                              feature("goal", "analyze"),
                                              feature("goal", "evaluate"),
                                              feature("gobj", "attribute"),
                                              feature("gobj", "response"),
                                              feature("gobj", "pattern")))

        chunks = Chunks()
        blas = BLAs(density=1.0)

        gb = GoalStay(controller=(subsystem("acs"), terminus("gb_actions")),
                      source=(subsystem("ms"), terminus("goal_selection")),
                      interface=interface,
                      chunks=chunks,
                      blas=blas)

        input_ = nd.NumDict(
            {
                feature(("gctl", ".cmd"), ".w"): 1.0,
                feature(("gctl", "goal"), "analyze"): 1.0,
                feature(("gctl", "gobj"), "pattern"): 1.0
            },
            default=0)
        inputs = {
            (subsystem("acs"), terminus("gb_actions")): input_,
            (subsystem("ms"), terminus("goal_selection")):
            nd.NumDict(default=0)
        }

        output = gb.call(inputs)
        chunks.step()

        # pprint(output)
        # pprint(chunks)
        # pprint(blas)

        input_ = nd.NumDict(
            {
                feature(("gctl", ".cmd"), ".w"): 1.0,
                feature(("gctl", "goal"), "evaluate"): 1.0,
                feature(("gctl", "gobj"), "attribute"): 1.0
            },
            default=0)
        inputs = {
            (subsystem("acs"), terminus("gb_actions")): input_,
            (subsystem("ms"), terminus("goal_selection")):
            nd.NumDict(default=0)
        }

        output = gb.call(inputs)
        chunks.step()

        # pprint(output)
        # pprint(chunks)
        # pprint(blas)

        input_ = nd.NumDict(
            {
                feature(("gctl", ".cmd"), ".f"): 1.0,
                feature(("gctl", "goal"), "analyze"): 1.0,
                feature(("gctl", "gobj"), "pattern"): 1.0
            },
            default=0)
        inputs = {
            (subsystem("acs"), terminus("gb_actions")):
            input_,
            (subsystem("ms"), terminus("goal_selection")):
            nd.NumDict({chunk(".goal_1"): 1.0})
        }

        output = gb.call(inputs)
        chunks.step()
Esempio n. 12
0
# In this particular simulation, we set our default actions to have a constant
# activation of 0.5.

default_strengths = nd.MutableNumDict(default=0)
default_strengths.extend(gate_interface.defaults, value=0.5)

# Next, we initialize and populate chunk and rule databases as in the original
# example.

cdb = Chunks()
rule_db = Rules()

rule_db.define(
    rule("1"),
    cdb.define(chunk("FRUIT"), feature("tasty", True), feature("sweet", True)),
    cdb.define(chunk("APPLE"), feature("color", "#ff0000"),
               feature("color", "#008000"), feature("tasty", True)))

cdb.define(chunk("JUICE"), feature("tasty", True), feature("state", "liquid"))

### Agent Assembly ###

# The agent assembly process is very similar to `free_association.py`, but we
# define some additional constructs and structures.

alice = Structure(name=agent("Alice"),
                  assets=Assets(gate_interface=gate_interface))

with alice:
Esempio n. 13
0

#############
### Setup ###
#############

# This demo shows how lagged features can be created on the fly within a 
# simulation using the Lag component and flow_in constructs. 

### Agent Setup ###

# The feature domain for this simple demo is the same as for 
# `free_association.py`.

feature_spec = [
    feature("color", "#ff0000"), # red
    feature("color", "#008000"), # green
    feature("tasty"),
    feature("state", "liquid"),
    feature("sweet")
]

# For this example, the agent architecture is minimal. We instantiate a 
# stimulus buffer and an NACS containing only a feature pool and the Lag 
# component.

# The lag component serves the construct `flow_in("lag")` which maps, at 
# the start of the NACS cycle, activations in the feature pool held over from 
# the previous cycle to activations of corresponding lagged features up to a 
# set maximum lag value.
Esempio n. 14
0
# indicating its dimension (e.g., color) and value (e.g., red). In pyClarion, 
# feature dimensions are further analyzed as consisting of a (tag, lag) pair. 
# The tag simply represents the name of the dimension. The lag value is handy 
# for tracking the activation of a particular feature over small time windows, 
# as may be required in, e.g., temporal difference learning. 

# In pyClarion, constructs are named using 'construct symbols'. As the name 
# suggests, construct symbols are intended to behave like formal tokens, and 
# their primary function is to help associate data with the constructs they 
# name. As a result, they are required to be immutable and hashable. It may be 
# helpful to think of construct symbols as fancy python tuples.

# We can invoke the construct symbol for a particular feature node by calling 
# the `feature()` constructor as shown below. 

f = feature(tag="my-tag", val="val-1", lag=0)

# The lag value is optional and defaults to 0.

assert f == feature(tag="my-tag", val="val-1") # does not fail

# For this simulation, we include (somewhat arbitrarily) feature nodes for the 
# colors red and green and a feature for each of tastiness, sweetness and the 
# liquid state. These dv pairs are specified below. We omit lag values from the 
# specification.

# Note that, in some cases, we do not provide feature values. This is sometimes 
# desirable, when we have singleton dimensions. In such cases, the feature 
# constructor automatically sets the value to the empty string.

feature_spec = [
Esempio n. 15
0
                      keyfunc=lambda a: feature(("r", "respond")))

        return r


### Knowledge Setup ###

# To set up the Q-Net we need to provide it with some initial information:
#   - What features are in its input domain?
#   - What features constitute its output interface?
#   - What features signal reinforcements?

# To specify all this we use pyClarion feature domains and feature interfaces.

domain = Domain((
    feature("A"),
    feature("B"),
))

interface = Interface(cmds=(feature("respond", "A"), feature("respond", "B"),
                            feature("respond", "standby")))

# To specify reinforcement signals, we use ReinforcementDomain, which expects a
# mapping from features representing reinforcement signals to the dimensions
# that they reinforce (recall, dimensions include the lag value). The mapping
# must be one-to-one.

r_map = Reinforcements(mapping={
    feature(("r", "respond")): ("respond", 0),
})
Esempio n. 16
0
# activation of 0.5.

default_strengths = nd.MutableNumDict(default=0)
default_strengths.extend(gate_interface.defaults, value=0.5)

# Next, we initialize and populate chunk and rule databases as in the original 
# example. 

cdb = Chunks()
rule_db = Rules()

rule_db.define(
    rule("1"), 
    cdb.define( 
        chunk("FRUIT"),
        feature("tasty", True),
        feature("sweet", True)
    ),
    cdb.define( 
        chunk("APPLE"), 
        feature("color", "#ff0000"), 
        feature("color", "#008000"),
        feature("tasty", True)
    )
) 

cdb.define( 
    chunk("JUICE"),
    feature("tasty", True),
    feature("state", "liquid")
)