Example #1
0
    def __init__(self,
                 atomspace,
                 stimulateAtoms=False,
                 agent=None,
                 learnRuleFrequencies=False,
                 preferAttentionalFocus=False,
                 allow_output_with_variables=False,
                 allow_backchaining_with_variables=False,
                 delete_temporary_variables=False,
                 log=None):

        if log is None:
            self.log = logger.create_logger(LOG_DEFAULT_FILENAME)
        else:
            self.log = logger
        self.log.set_level(LOG_LEVEL)
        self.log.fine("Initializing PLN MindAgent")

        AbstractChainer.__init__(self, atomspace, self.log)

        # It stores a reference to the MindAgent object so it can
        # stimulate atoms.
        self._stimulateAtoms = stimulateAtoms
        self._agent = agent
        self._preferAttentionalFocus = preferAttentionalFocus
        self.learnRuleFrequencies = learnRuleFrequencies
        self._allow_output_with_variables = allow_output_with_variables
        self._allow_backchaining_with_variables = allow_backchaining_with_variables
        self._delete_temporary_variables = delete_temporary_variables

        self.atomspace = atomspace

        # For every atom, store the atoms used to produce it (including
        # the atoms used to produce them). This prevents cycles (very
        # important) as well as repeating the same inference.
        # Map from Atom -> set(Atom)
        # Default value is the empty set
        self.trails = defaultdict(set)
        # Todo: What is the following line for?
        #self.produced_from = defaultdict(set)
        self.history_index = InferenceHistoryIndex()

        # Todo:
        #self.history_atomspace = AtomSpace()
        # TODO actually load and save these. When loading it, rebuild
        # the indexes above.

        # Record how often each Rule is used. To bias the Rule
        # frequencies. It will take longer to adapt if you set this
        # higher (this is important so it won't get a crazy feedback
        # loop).
        initial_frequency = 100

        def constant_factory():
            return initial_frequency

        if learnRuleFrequencies:
            self.rule_count = defaultdict(constant_factory)
Example #2
0
    def __init__(
        self,
        atomspace,
        stimulateAtoms=False,
        agent=None,
        learnRuleFrequencies=False,
        preferAttentionalFocus=False,
        allow_output_with_variables=False,
        allow_backchaining_with_variables=False,
        delete_temporary_variables=False,
        check_cycles=True,
        check_repeats=True,
        log=None,
    ):

        if log is None:
            self.log = logger.create_logger(LOG_DEFAULT_FILENAME)
        else:
            self.log = logger
        self.log.set_level(LOG_LEVEL)
        self.log.fine("Initializing PLN MindAgent")

        AbstractChainer.__init__(self, atomspace, self.log)

        # It stores a reference to the MindAgent object so it can
        # stimulate atoms.
        self.atomspace = atomspace
        self._stimulateAtoms = stimulateAtoms
        self._agent = agent
        self._preferAttentionalFocus = preferAttentionalFocus
        self.learnRuleFrequencies = learnRuleFrequencies
        self._allow_output_with_variables = allow_output_with_variables
        self._allow_backchaining_with_variables = allow_backchaining_with_variables
        self._delete_temporary_variables = delete_temporary_variables
        self._check_cycles = check_cycles
        self._check_repeats = check_repeats
        # You have to record the inference history in order to check cycles
        if not check_repeats:
            assert check_cycles

        self.atomspace = atomspace

        self.history = AtomSpaceBasedInferenceHistory(
            chainer=self, main_atomspace=atomspace, history_atomspace=atomspace
        )

        # Record how often each Rule is used. To bias the Rule
        # frequencies. It will take longer to adapt if you set this
        # higher (this is important so it won't get a crazy feedback
        # loop).
        initial_frequency = 100

        def constant_factory():
            return initial_frequency

        if learnRuleFrequencies:
            self.rule_count = defaultdict(constant_factory)
Example #3
0
    def __init__(self,
                 atomspace,
                 stimulateAtoms=False,
                 agent=None,
                 learnRuleFrequencies=False,
                 preferAttentionalFocus=False,
                 allow_output_with_variables=False,
                 allow_backchaining_with_variables=False,
                 delete_temporary_variables=False,
                 log=None):

        if log is None:
            self.log = logger.create_logger(LOG_DEFAULT_FILENAME)
        else:
            self.log = logger
        self.log.set_level(LOG_LEVEL)
        self.log.fine("Initializing PLN MindAgent")

        AbstractChainer.__init__(self, atomspace, self.log)

        # It stores a reference to the MindAgent object so it can
        # stimulate atoms.
        self._stimulateAtoms = stimulateAtoms
        self._agent = agent
        self._preferAttentionalFocus = preferAttentionalFocus
        self.learnRuleFrequencies = learnRuleFrequencies
        self._allow_output_with_variables = allow_output_with_variables
        self._allow_backchaining_with_variables = allow_backchaining_with_variables
        self._delete_temporary_variables = delete_temporary_variables

        self.atomspace = atomspace

        # For every atom, store the atoms used to produce it (including
        # the atoms used to produce them). This prevents cycles (very
        # important) as well as repeating the same inference.
        # Map from Atom -> set(Atom)
        # Default value is the empty set
        self.trails = defaultdict(set)
        # Todo: What is the following line for?
        #self.produced_from = defaultdict(set)
        self.history_index = InferenceHistoryIndex()

        # Todo:
        #self.history_atomspace = AtomSpace()
        # TODO actually load and save these. When loading it, rebuild
        # the indexes above.

        # Record how often each Rule is used. To bias the Rule
        # frequencies. It will take longer to adapt if you set this
        # higher (this is important so it won't get a crazy feedback
        # loop).
        initial_frequency = 100

        def constant_factory():
            return initial_frequency
        if learnRuleFrequencies:
            self.rule_count = defaultdict(constant_factory)
Example #4
0
    def __init__(self,
                 atomspace,
                 stimulateAtoms=False,
                 agent=None,
                 learnRuleFrequencies=False,
                 preferAttentionalFocus=False,
                 allow_output_with_variables=False,
                 allow_backchaining_with_variables=False,
                 delete_temporary_variables=False,
                 check_cycles=True,
                 check_repeats=True,
                 log=None):

        if log is None:
            self.log = logger.create_logger(LOG_DEFAULT_FILENAME)
        else:
            self.log = logger
        self.log.set_level(LOG_LEVEL)
        self.log.fine("Initializing PLN MindAgent")

        AbstractChainer.__init__(self, atomspace, self.log)

        # It stores a reference to the MindAgent object so it can
        # stimulate atoms.
        self.atomspace = atomspace
        self._stimulateAtoms = stimulateAtoms
        self._agent = agent
        self._preferAttentionalFocus = preferAttentionalFocus
        self.learnRuleFrequencies = learnRuleFrequencies
        self._allow_output_with_variables = allow_output_with_variables
        self._allow_backchaining_with_variables = allow_backchaining_with_variables
        self._delete_temporary_variables = delete_temporary_variables
        self._check_cycles = check_cycles
        self._check_repeats = check_repeats
        # You have to record the inference history in order to check cycles
        if not check_repeats: assert check_cycles

        self.atomspace = atomspace

        self.history = AtomSpaceBasedInferenceHistory(
            chainer=self,
            main_atomspace=atomspace,
            history_atomspace=atomspace)

        # Record how often each Rule is used. To bias the Rule
        # frequencies. It will take longer to adapt if you set this
        # higher (this is important so it won't get a crazy feedback
        # loop).
        initial_frequency = 100

        def constant_factory():
            return initial_frequency

        if learnRuleFrequencies:
            self.rule_count = defaultdict(constant_factory)
Example #5
0
    TruthValue,
)
from opencog.utilities import get_free_variables

#############
# Constants #
#############

TRUE_TV = createTruthValue(1, 1)
DEFAULT_TV = createTruthValue(1, 0)

#############
# Variables #
#############

agent_log = create_logger("opencog.log")
agent_log.set_component("Agent")

#############
# Functions #
#############


def add_to_atomspace(atoms: set[Atom] | list[Atom],
                     atomspace: AtomSpace) -> None:
    """Add all atoms to the atomspace."""

    for atom in atoms:
        atomspace.add_atom(atom)

Example #6
0
from __future__ import print_function
from pprint import pprint
from opencog.cogserver import MindAgent
from opencog.atomspace import types, AtomSpace, TruthValue
from opencog.scheme_wrapper import load_scm,scheme_eval_h,scheme_eval, __init__
from opencog import logger

import Queue
import time

__author__ = 'Hujie Wang'
LOG_LEVEL="fine"
log = logger.create_logger("/tmp/hobbs.log")
log.set_level(LOG_LEVEL)

'''
========================================
Configurations
'''

'''
Number of searching sentences(including the one contains the pronoun)
'''

NUMBER_OF_SEARCHING_SENTENCES = 3

'''
Suppose the decreasing rate is x, then
the ith accepted candidate will have confidence value of
(x^(i-1))(1-x)  i starts at 1.
Example #7
0
from __future__ import print_function
from pprint import pprint
from opencog.cogserver import MindAgent
from opencog.atomspace import types, AtomSpace, TruthValue
from opencog.scheme_wrapper import load_scm,scheme_eval_h,scheme_eval, __init__
from opencog import logger

import Queue
import time

__author__ = 'Hujie Wang'
LOG_LEVEL="fine"
log = logger.create_logger("/tmp/hobbs.log")
log.set_level(LOG_LEVEL)

'''
========================================
Configurations
'''

'''
Number of searching sentences(including the one contains the pronoun)
'''

NUMBER_OF_SEARCHING_SENTENCES = 3

'''
Suppose the decreasing rate is x, then
the ith accepted candidate will have confidence value of
(x^(i-1))(1-x)  i starts at 1.