Ejemplo n.º 1
0
def main():
    # Init loggers
    log.set_level("fine")
    log.set_sync(False)
    agent_log.set_level("fine")
    agent_log.set_sync(False)
    ure_logger().set_level("fine")
    ure_logger().set_sync(False)

    # Set main atomspace
    atomspace = AtomSpace()
    set_default_atomspace(atomspace)

    # Wrap environment
    wrapped_env = CartPoleWrapper(env)

    # Instantiate CartPoleAgent, and tune parameters
    cpa = CartPoleAgent(wrapped_env)
    cpa.delta = 1.0e-16

    # Run control loop
    while cpa.step():
        time.sleep(0.1)
        log.info("step_count = {}".format(cpa.step_count))

    print(f"The final reward is {cpa.accumulated_reward}.")
Ejemplo n.º 2
0
    def test_probabilities(self):
        log.info("Statistics> test_probabilities")

        # Add multiple data
        for case in self.test_case:
            self.provider.add_one_metadata(case)

        # Add data's count
        arr_2_gram_1 = [self.aaa, self.bbb]
        arr_2_gram_2 = [self.aaa, self.ccc]
        arr_3_gram_1 = [self.aaa, self.bbb, self.ccc]
        self.provider.add_one_rawdata_count(arr_2_gram_1, 2)
        self.provider.add_one_rawdata_count(arr_2_gram_2, 1)
        self.provider.add_one_rawdata_count(arr_3_gram_1, 1)

        # Calculate probabilities
        PyProbabilityAtom().calculate_probabilities(self.provider)

        statistic_data = self.provider.datamap_find(arr_2_gram_1)
        assert_less_equal(statistic_data.probability, 0.667)
        assert_greater_equal(statistic_data.probability, 0.665)
        statistic_data = self.provider.datamap_find(arr_2_gram_2)
        assert_less_equal(statistic_data.probability, 0.334)
        assert_greater_equal(statistic_data.probability, 0.332)
        statistic_data = self.provider.datamap_find(arr_3_gram_1)
        assert_less_equal(statistic_data.probability, 1.1)
        assert_greater_equal(statistic_data.probability, 0.9)
Ejemplo n.º 3
0
    def test_interaction_information(self):
        log.info("Statistics> test_interaction_information")

        # Add multiple data
        for case in self.test_case:
            self.provider.add_one_metadata(case)

        # Add data's count
        arr_2_gram_1 = [self.aaa, self.bbb]
        arr_2_gram_2 = [self.aaa, self.ccc]
        arr_3_gram_1 = [self.aaa, self.bbb, self.ccc]
        self.provider.add_one_rawdata_count(arr_2_gram_1, 2)
        self.provider.add_one_rawdata_count(arr_2_gram_2, 1)
        self.provider.add_one_rawdata_count(arr_3_gram_1, 1)

        # Calculate probabilities
        PyProbabilityAtom().calculate_probabilities(self.provider)
        PyEntropyAtom().calculate_entropies(self.provider)
        PyInteractionInformationAtom().calculate_interaction_informations(self.provider)

        statistic_data = self.provider.datamap_find(arr_2_gram_1)
        assert_less_equal(statistic_data.interaction_information, -0.388)
        assert_greater_equal(statistic_data.interaction_information, -0.390)
        statistic_data = self.provider.datamap_find(arr_2_gram_2)
        assert_less_equal(statistic_data.interaction_information, -0.527)
        assert_greater_equal(statistic_data.interaction_information, -0.529)
        statistic_data = self.provider.datamap_find(arr_3_gram_1)
        assert_less_equal(statistic_data.interaction_information, -0.917)
        assert_greater_equal(statistic_data.interaction_information, -0.919)
Ejemplo n.º 4
0
def get_weighted_tv(atoms):
    if len(atoms) < 1:
        log.info("Weighted TruthValue can't be evaluated with small size.")
        return TruthValue()
    elif len(atoms) == 1:
        return atoms[0].tv

    mean_sum = 0

    weighted_strength_sum = 0
    confidence_sum = 0
    link_count = 0

    for atom in atoms:
        weighted_strength_sum += (atom.tv.confidence * atom.tv.mean)
        confidence_sum += atom.tv.confidence
        link_count += 1

    if confidence_sum != 0:
        new_strength = weighted_strength_sum / confidence_sum
    else:
        # This is arithmetic mean, maybe given atoms doesn't have TruthValue.
        for atom in atoms:
            mean_sum += atom.tv.mean
        new_strength = mean_sum / link_count

    # TODO: Currently, confidence value for new blended node is just
    # average of old value.
    new_confidence = confidence_sum / link_count
    return TruthValue(new_strength, new_confidence)
Ejemplo n.º 5
0
def main():
    # Init loggers
    log.set_level("fine")
    log.set_sync(False)
    agent_log.set_level("fine")
    agent_log.set_sync(False)
    ure_logger().set_level("fine")
    ure_logger().set_sync(False)

    # Set main atomspace
    atomspace = AtomSpace()
    set_default_atomspace(atomspace)

    # Wrap environment
    wrapped_env = CartPoleWrapper(env, atomspace)

    # Instantiate CartPoleAgent, and tune parameters
    cpa = FixedCartPoleAgent(wrapped_env, atomspace)
    cpa.delta = 1.0e-16

    # Run control loop
    while not cpa.control_cycle():
        wrapped_env.render()
        time.sleep(0.1)
        log.info("cycle_count = {}".format(cpa.cycle_count))

    log_msg(agent_log, f"The final reward is {cpa.accumulated_reward}.")
Ejemplo n.º 6
0
    def __connect_links_simple(self, decided_atoms, new_blended_atom):
        """
        Implementation of simple link connector.

        1. Find duplicate, non-duplicate links both.
        2. Try to improve some conflict in duplicate links and connect to new
         blended atom.
        3. Try to connect to new blended atom.

        :param list(types.Atom) decided_atoms: List of atoms to search
        links to be connected to new blended atom.
        :param Atom new_blended_atom: New blended atom.
        """
        duplicate_links, non_duplicate_links = \
            find_duplicate_links(self.a, decided_atoms)

        self.__connect_duplicate_links(duplicate_links, new_blended_atom)
        self.__connect_non_duplicate_links(non_duplicate_links)

        # Make the links between source nodes and newly blended node.
        # TODO: Give proper truth value, not average of truthvalue.
        for new_blended_atom in self.ret:
            try:
                weighted_tv = get_weighted_tv(
                    self.a.get_incoming(new_blended_atom.h))
            except UserWarning as e:
                log.info(str(e))
                weighted_tv = TruthValue()
            for decided_atom in decided_atoms:
                self.a.add_link(types.AssociativeLink,
                                [decided_atom, new_blended_atom], weighted_tv)
Ejemplo n.º 7
0
    def run(self, focus_atoms=None, config_base=None):
        """Execute a conceptual blending algorithm.

        Args:
            focus_atoms: The atoms to blend.
            config_base: A Node to save custom config.
            :param focus_atoms: list[Atom]
            :param config_base: Atom
        Returns:
            The blended atom(s).
            Example:
            [(ConceptNode "car-man"),
             (ConceptNode "man-car"),
             ...]
            If a list is empty, then means blender couldn't make a proper
            blend atom(s) with given atoms.
            :rtype : list[Atom]
        """

        try:
            self.__prepare(focus_atoms, config_base)

            # Choose nodes to blending.
            self.chosen_atoms = \
                ChooserFinder(self.a).\
                get_chooser(self.config_base).\
                atom_choose(self.focus_atoms, self.config_base)

            # Decide whether or not to execute blending and prepare.
            self.decided_atoms = \
                DeciderFinder(self.a).\
                get_decider(self.config_base).\
                blending_decide(self.chosen_atoms, self.config_base)

            # Initialize the new blend node.
            self.merged_atom = \
                MakerFinder(self.a).\
                get_maker(self.config_base).\
                new_blend_make(self.decided_atoms, self.config_base)

            # Make the links between exist nodes and newly blended node.
            # Check the severe conflict links in each node and remove.
            # Detect and improve conflict links in newly blended node.
            self.blended_atoms = \
                ConnectorFinder(self.a).\
                get_connector(self.config_base).\
                link_connect(self.decided_atoms, self.merged_atom, config_base)

            # Sum up blending.
            self.__clean_up()
        except UserWarning as e:
            log.info('Skip blending due to: ' + str(e))
            self.blended_atoms = []

        # Returns the blended atom(s).
        return self.blended_atoms
Ejemplo n.º 8
0
    def run(self, focus_atoms=None, config_base=None):
        """Execute a conceptual blending algorithm.

        Args:
            focus_atoms: The atoms to blend.
            config_base: A Node to save custom config.
            :param focus_atoms: list[Atom]
            :param config_base: Atom
        Returns:
            The blended atom(s).
            Example:
            [(ConceptNode "car-man"),
             (ConceptNode "man-car"),
             ...]
            If a list is empty, then means blender couldn't make a proper
            blend atom(s) with given atoms.
            :rtype : list[Atom]
        """

        try:
            self.__prepare(focus_atoms, config_base)

            # Choose nodes to blending.
            self.chosen_atoms = \
                ChooserFinder(self.a).\
                get_chooser(self.config_base).\
                atom_choose(self.focus_atoms, self.config_base)

            # Decide whether or not to execute blending and prepare.
            self.decided_atoms = \
                DeciderFinder(self.a).\
                get_decider(self.config_base).\
                blending_decide(self.chosen_atoms, self.config_base)

            # Initialize the new blend node.
            self.merged_atom = \
                MakerFinder(self.a).\
                get_maker(self.config_base).\
                new_blend_make(self.decided_atoms, self.config_base)

            # Make the links between exist nodes and newly blended node.
            # Check the severe conflict links in each node and remove.
            # Detect and improve conflict links in newly blended node.
            self.blended_atoms = \
                ConnectorFinder(self.a).\
                get_connector(self.config_base).\
                link_connect(self.decided_atoms, self.merged_atom, config_base)

            # Sum up blending.
            self.__clean_up()
        except UserWarning as e:
            log.info('Skip blending due to: ' + str(e))
            self.blended_atoms = []

        # Returns the blended atom(s).
        return self.blended_atoms
    def __get_max_n_gram(
            self,
            conflict_link_cases,
            non_conflict_link_cases,
            non_duplicate_link_cases,
            related_node_target_links
    ):
        """Decide the max value of n_gram, from every category link set.

        MAX(
           (USER DEFINED LIMIT),
           length of (related_node_target_link),
           length of (conflict_link + non_conflict_link + non_duplicate_link)
        )

        Args:
            conflict_link_cases: Conflicted link tuples list.
            non_conflict_link_cases: Non-conflict links list.
            non_duplicate_link_cases: Non-duplicated links list.
            related_node_target_links: Target link tuples in related node list.
            :param conflict_link_cases: list[list[EqualLinkKey]]
            :param non_conflict_link_cases: list[EqualLinkKey]
            :param non_duplicate_link_cases: list[EqualLinkKey]
            :param related_node_target_links: list[list[EqualLinkKey]]
        Returns:
            The max value of n_gram.
            :rtype : int
        """
        conflict_link_n_gram = 0 \
            if len(conflict_link_cases) == 0 \
            else len(conflict_link_cases[0])

        merged_link_n_gram = \
            conflict_link_n_gram + \
            len(non_conflict_link_cases) + \
            len(non_duplicate_link_cases)

        target_n_gram = list(map(lambda x: len(x), related_node_target_links))
        target_n_gram.append(merged_link_n_gram)

        n_gram = self.data_n_gram_limit \
            if 0 < self.data_n_gram_limit < max(target_n_gram) \
            else max(target_n_gram)

        if n_gram == self.data_n_gram_limit:
            log.info(
                "ConnectConflictInteractionInformation: "
                "n_gram was limited to: " + str(self.data_n_gram_limit) +
                ", original n_gram was: " + str(max(target_n_gram))
            )

        return n_gram
Ejemplo n.º 10
0
def get_weighted_tv(atoms):
    """Calculate the weighted average of TruthValue of atoms list.

    T1 ... Tk = TruthValue in source atoms
    A = new TruthValue

    strength of A = sA
    confidence of A = cA

    sA = revision of T1...Tk = (sT1*cT1 + ... + sTk*cTk) / (cT1 + ... + cTk)
    cA = (cT1 + ... + cTk) / k

    See: https://groups.google.com/forum/#!topic/opencog/fa5c4yE8YdU

    Args:
        atoms: A list of atom to calculate the weighted average of TruthValue.
        :param atoms: list[Atom]
    Returns:
        An weighted average of TruthValue.
        :rtype: TruthValue
    """
    if len(atoms) < 1:
        log.info("Weighted TruthValue can't be evaluated with small size.")
        return TruthValue()
    elif len(atoms) == 1:
        return atoms[0].tv

    mean_sum = 0

    weighted_strength_sum = 0
    confidence_sum = 0
    link_count = 0

    for atom in atoms:
        weighted_strength_sum += (atom.tv.confidence * atom.tv.mean)
        confidence_sum += atom.tv.confidence
        link_count += 1

    if confidence_sum != 0:
        new_strength = weighted_strength_sum / confidence_sum
    else:
        # This is arithmetic mean, maybe given atoms doesn't have TruthValue.
        for atom in atoms:
            mean_sum += atom.tv.mean
        new_strength = mean_sum / link_count

    # TODO: Currently, confidence value for new blended node is just
    # average of old value.
    new_confidence = confidence_sum / link_count
    return TruthValue(new_strength, new_confidence)
Ejemplo n.º 11
0
def get_weighted_tv(atoms):
    """Calculate the weighted average of TruthValue of atoms list.

    T1 ... Tk = TruthValue in source atoms
    A = new TruthValue

    strength of A = sA
    confidence of A = cA

    sA = revision of T1...Tk = (sT1*cT1 + ... + sTk*cTk) / (cT1 + ... + cTk)
    cA = (cT1 + ... + cTk) / k

    See: https://groups.google.com/forum/#!topic/opencog/fa5c4yE8YdU

    Args:
        atoms: A list of atom to calculate the weighted average of TruthValue.
        :param atoms: list[Atom]
    Returns:
        An weighted average of TruthValue.
        :rtype: TruthValue
    """
    if len(atoms) < 1:
        log.info("Weighted TruthValue can't be evaluated with small size.")
        return TruthValue()
    elif len(atoms) == 1:
        return atoms[0].tv

    mean_sum = 0

    weighted_strength_sum = 0
    confidence_sum = 0
    link_count = 0

    for atom in atoms:
        weighted_strength_sum += (atom.tv.confidence * atom.tv.mean)
        confidence_sum += atom.tv.confidence
        link_count += 1

    if confidence_sum != 0:
        new_strength = weighted_strength_sum / confidence_sum
    else:
        # This is arithmetic mean, maybe given atoms doesn't have TruthValue.
        for atom in atoms:
            mean_sum += atom.tv.mean
        new_strength = mean_sum / link_count

    # TODO: Currently, confidence value for new blended node is just
    # average of old value.
    new_confidence = confidence_sum / link_count
    return TruthValue(new_strength, new_confidence)
Ejemplo n.º 12
0
    def atom_choose(self, focus_atoms, config_base):
        self.last_status = self.Status.IN_PROCESS

        try:
            self.atom_choose_impl(focus_atoms, config_base)
        except UserWarning as e:
            log.info("Skipping choose, caused by '" + str(e) + "'")
            log.info("Last status is '" +
                     self.Status.reverse_mapping[self.last_status] + "'")
            raise e

        if self.last_status == self.Status.IN_PROCESS:
            self.last_status = self.Status.SUCCESS_CHOOSE

        return self.ret
Ejemplo n.º 13
0
    def blending_decide(self, chosen_atoms, config_base):
        self.last_status = self.Status.IN_PROCESS

        try:
            self.blending_decide_impl(chosen_atoms, config_base)
        except UserWarning as e:
            log.info("Skipping decide, caused by '" + str(e) + "'")
            log.info("Last status is '" +
                     self.Status.reverse_mapping[self.last_status] + "'")
            raise e

        if self.last_status == self.Status.IN_PROCESS:
            self.last_status = self.Status.SUCCESS_DECIDE

        return self.ret
Ejemplo n.º 14
0
    def __get_max_n_gram(self, conflict_link_cases, non_conflict_link_cases,
                         non_duplicate_link_cases, related_node_target_links):
        """Decide the max value of n_gram, from every category link set.

        MAX(
           (USER DEFINED LIMIT),
           length of (related_node_target_link),
           length of (conflict_link + non_conflict_link + non_duplicate_link)
        )

        Args:
            conflict_link_cases: Conflicted link tuples list.
            non_conflict_link_cases: Non-conflict links list.
            non_duplicate_link_cases: Non-duplicated links list.
            related_node_target_links: Target link tuples in related node list.
            :param conflict_link_cases: list[list[EqualLinkKey]]
            :param non_conflict_link_cases: list[EqualLinkKey]
            :param non_duplicate_link_cases: list[EqualLinkKey]
            :param related_node_target_links: list[list[EqualLinkKey]]
        Returns:
            The max value of n_gram.
            :rtype : int
        """
        conflict_link_n_gram = 0 \
            if len(conflict_link_cases) == 0 \
            else len(conflict_link_cases[0])

        merged_link_n_gram = \
            conflict_link_n_gram + \
            len(non_conflict_link_cases) + \
            len(non_duplicate_link_cases)

        target_n_gram = list(map(lambda x: len(x), related_node_target_links))
        target_n_gram.append(merged_link_n_gram)

        n_gram = self.data_n_gram_limit \
            if 0 < self.data_n_gram_limit < max(target_n_gram) \
            else max(target_n_gram)

        if n_gram == self.data_n_gram_limit:
            log.info("ConnectConflictInteractionInformation: "
                     "n_gram was limited to: " + str(self.data_n_gram_limit) +
                     ", original n_gram was: " + str(max(target_n_gram)))

        return n_gram
Ejemplo n.º 15
0
    def new_blend_make(self, decided_atoms, config_base):
        self.last_status = self.Status.IN_PROCESS

        try:
            self.new_blend_make_impl(decided_atoms, config_base)
        except UserWarning as e:
            log.info("Skipping make, caused by '" + str(e) + "'")
            log.info(
                "Last status is '" +
                self.Status.reverse_mapping[self.last_status] +
                "'"
            )
            raise e

        if self.last_status == self.Status.IN_PROCESS:
            self.last_status = self.Status.SUCCESS_MAKE

        return self.ret
Ejemplo n.º 16
0
    def link_connect(self, decided_atoms, new_blended_atom, config_base):
        self.last_status = self.Status.IN_PROCESS

        try:
            self.link_connect_impl(decided_atoms, new_blended_atom, config_base)
        except UserWarning as e:
            log.info("Skipping connect, caused by '" + str(e) + "'")
            log.info(
                "Last status is '" +
                self.Status.reverse_mapping[self.last_status] +
                "'"
            )
            raise e

        if self.last_status == self.Status.IN_PROCESS:
            self.last_status = self.Status.SUCCESS_CONNECT

        return self.ret
Ejemplo n.º 17
0
    def test_data_provider(self):
        log.info("Statistics> test_data_provider")
        # Add one data
        is_first_insert = self.provider.add_one_metadata(self.aaa)
        assert_true(is_first_insert)

        # Re-add one data
        is_first_insert = self.provider.add_one_metadata(self.aaa)
        assert_false(is_first_insert)

        # Add multiple data
        for case in self.test_case:
            self.provider.add_one_metadata(case)
        assert_equal(self.provider.dataset_size(), 6)

        # Add data's count (n_gram = 2)
        arr_2_gram_1 = [self.aaa, self.bbb]
        arr_2_gram_2 = [self.aaa, self.ccc]
        self.provider.add_one_rawdata_count(arr_2_gram_1, 2)
        self.provider.add_one_rawdata_count(arr_2_gram_2, 1)

        # Add data's count (n_gram = 3)
        arr_3_gram_1 = [self.aaa, self.bbb, self.ccc]
        self.provider.add_one_rawdata_count(arr_3_gram_1, 1)

        # Test key vector
        key_vector_1 = self.provider.make_key_from_data(self.test_case)
        assert_equal(len(key_vector_1), 6)

        combination_array = [True, False, True, False, True, False]
        key_vector_2 = self.provider.make_key_from_data(
            self.test_case, combination_array)
        assert_equal(len(key_vector_2), 3)

        value_vector = self.provider.make_data_from_key(self.a, key_vector_1)
        assert_equal(len(value_vector), 6)
def run_message_passing_ure():
    fc_message_sending_rule_name = DefinedSchemaNode("fc-message-sending-rule")

    DefineLink(fc_message_sending_rule_name, create_messages_rule)

    fc_message_sending_rbs = ConceptNode("fc-message-sending-rule")

    MemberLink(fc_message_sending_rule_name, fc_message_sending_rbs)

    EvaluationLink(PredicateNode("URE:FC:retry-exhausted-sources"),
                   fc_message_sending_rbs).tv = TruthValue(1, 1)

    # Set URE maximum-iterations
    from opencog.scheme_wrapper import scheme_eval

    execute_code = \
        '''
        (use-modules (opencog) (opencog rule-engine))
        (ure-set-num-parameter (ConceptNode "fc-message-sending-rbs") "URE:maximum-iterations" 10)
        '''

    scheme_eval(atomspace, execute_code)

    log.info("=== Dump AtomSpace Begin ===")
    for atom in atomspace:
        if not atom.incoming:
            log.info(str(atom))
    log.info("=== Dump AtomSpace End   ===")

    chainer = ForwardChainer(
        atomspace, ConceptNode("fc-message-sending-rule"),
        get_directed_edge(VariableNode("$V1"), VariableNode("$V2")),
        VariableList(
            TypedVariableLink(VariableNode("$V1"), TypeNode("ConceptNode")),
            TypedVariableLink(VariableNode("$V2"), TypeNode("ConceptNode"))))

    # chainer = BackwardChainer(atomspace,
    #                           ConceptNode("fc-message-sending-rule"),
    #                           get_message(VariableNode("$V1"), VariableNode("$V2")),
    #                           VariableList(
    #                               TypedVariableLink(VariableNode("$V1"), TypeNode("ConceptNode")),
    #                               TypedVariableLink(VariableNode("$V2"), TypeNode("ConceptNode"))))

    chainer.do_chain()
    results = chainer.get_results()
Ejemplo n.º 19
0
    # ChaseAgent
    ca = ChaseAgent(wrapped_env, action_space, pgoal, ngoal)

    # Eat some food.
    ca.eat(4)
    time.sleep(5)

    # Training/learning loop
    lt_iterations = 3  # Number of learning-training iterations
    lt_period = 200  # Duration of a learning-training iteration
    for i in range(lt_iterations):
        ca.reset_action_counter()
        par = ca.accumulated_reward  # Keep track of the reward before
        # Discover patterns to make more informed decisions
        agent_log.info("Start learning ({}/{})".format(i + 1, lt_iterations))
        ca.learn()
        ca.wake()
        # Run agent to accumulate percepta
        agent_log.info("Start training ({}/{})".format(i + 1, lt_iterations))
        for j in range(lt_period):
            ca.control_cycle()
            time.sleep(0.01)
            log.info("cycle_count = {}".format(ca.cycle_count))
        nar = ca.accumulated_reward - par
        agent_log.info("Accumulated reward during {}th iteration = {}".format(
            i + 1, nar))
        agent_log.info("Action counter during {}th iteration:\n{}".format(
            i + 1, ca.action_counter))
        ca.eat(8 - i)
Ejemplo n.º 20
0
    # ChaseAgent
    ca = ChaseAgent(wrapped_env, action_space, pgoal, ngoal)

    # Eat some food.
    ca.eat(4)
    time.sleep(5)

    # Training/learning loop
    lt_iterations = 3  # Number of learning-training iterations
    lt_period = 200  # Duration of a learning-training iteration
    for i in range(lt_iterations):
        ca.reset_action_counter()
        par = ca.accumulated_reward  # Keep track of the reward before
        # Discover patterns to make more informed decisions
        agent_log.info("Start learning ({}/{})".format(i + 1, lt_iterations))
        ca.learn()
        ca.wake()
        # Run agent to accumulate percepta
        agent_log.info("Start training ({}/{})".format(i + 1, lt_iterations))
        for j in range(lt_period):
            ca.step()
            time.sleep(0.01)
            log.info("step_count = {}".format(ca.step_count))
        nar = ca.accumulated_reward - par
        agent_log.info("Accumulated reward during {}th iteration = {}".format(
            i + 1, nar))
        agent_log.info("Action counter during {}th iteration:\n{}".format(
            i + 1, ca.action_counter))
        ca.eat(8 - i)
Ejemplo n.º 21
0
the example simple, the below is static; it simply counts green and
red lights, and halts at the first red light.

The if-then is implemented via a matching clause with the pattern
matcher. When a match is seen, the matcher moves on to the next
clause.
"""

from opencog.atomspace import AtomSpace, TruthValue, types, get_type_name
from opencog.bindlink import satisfaction_link
from opencog.type_constructors import *
from opencog.logger import Logger, log

# Logging will be written to opencog.log in the current directory.
log.set_level('DEBUG')
log.info("Starting the stop-go demo")

# The atomspace where everything will live.
atomspace = AtomSpace()
set_type_ctor_atomspace(atomspace)


# The callback counts the number fo red and green lights.
# It returns a TruthValue of TRUE for green lights and FALSE for the
# red lights.  FALSE is interpreted as a mismatch (failure to satisfy)
# by the pattner matcher, and thus, the pattern matcher will backtrack
# and sarch for a different solution. Since the example below contains
# no variables, it will just backtrack to the start, and then report
# non-satisfiability (which is what we want, when we get a red light).
green = 0
red = 0
Ejemplo n.º 22
0
# https://github.com/noskill/opencog-intro

import os.path
from opencog.scheme_wrapper import scheme_eval, scheme_eval_h
from opencog.atomspace import TruthValue
from opencog.backwardchainer import BackwardChainer
from opencog.type_constructors import *
from opencog.utilities import initialize_opencog

from opencog.logger import Logger, log

# Logging will be written to opencog.log in the current directory.
# log.set_level('FINE')
# log.set_level('DEBUG')
log.set_level('INFO')
log.info("Starting the Socrates sample")

atomspace = AtomSpace()
initialize_opencog(atomspace)

scheme_eval(atomspace, '(use-modules (opencog))')
scheme_eval(atomspace, '(use-modules (opencog exec))')
scheme_eval(atomspace, '(use-modules (opencog query))')
scheme_eval(atomspace, '(use-modules (opencog rule-engine))')

pln_path = os.path.expanduser("/home/opencog/share/opencog/opencog/pln")
pln_config_path = os.path.expanduser(
    "/home/opencog/share/opencog/opencog/pln/pln-config.scm")

scheme_eval(atomspace, '(add-to-load-path "{0}")'.format(pln_path))
Ejemplo n.º 23
0
the example simple, the below is static; it simply counts green and
red lights, and halts at the first red light.

The if-then is implemented via a matching clause with the pattern
matcher. When a match is seen, the matcher moves on to the next
clause.
"""

from opencog.atomspace import AtomSpace, TruthValue, types, get_type_name
from opencog.bindlink import satisfaction_link
from opencog.type_constructors import *
from opencog.logger import Logger, log

# Logging will be written to opencog.log in the current directory.
log.set_level('DEBUG')
log.info("Starting the stop-go demo")

# The atomspace where everything will live.
atomspace = AtomSpace()
set_type_ctor_atomspace(atomspace)

# The callback counts the number fo red and green lights.
# It returns a TruthValue of TRUE for green lights and FALSE for the
# red lights.  FALSE is interpreted as a mismatch (failure to satisfy)
# by the pattner matcher, and thus, the pattern matcher will backtrack
# and sarch for a different solution. Since the example below contains
# no variables, it will just backtrack to the start, and then report
# non-satisfiability (which is what we want, when we get a red light).
green = 0
red = 0
Ejemplo n.º 24
0
 def print_detail(self, provider):
     log.info(provider.print_data_map())