Пример #1
0
def main():
    # Init loggers
    log.set_level("fine")
    log.set_sync(False)
    agent_log.set_level("fine")
    agent_log.set_sync(False)
    ure_logger().set_level("fine")
    ure_logger().set_sync(False)

    # Set main atomspace
    atomspace = AtomSpace()
    set_default_atomspace(atomspace)

    # Wrap environment
    wrapped_env = CartPoleWrapper(env, atomspace)

    # Instantiate CartPoleAgent, and tune parameters
    cpa = FixedCartPoleAgent(wrapped_env, atomspace)
    cpa.delta = 1.0e-16

    # Run control loop
    while not cpa.control_cycle():
        wrapped_env.render()
        time.sleep(0.1)
        log.info("cycle_count = {}".format(cpa.cycle_count))

    log_msg(agent_log, f"The final reward is {cpa.accumulated_reward}.")
Пример #2
0
def main():
    # Init loggers
    log.set_level("fine")
    log.set_sync(False)
    agent_log.set_level("fine")
    agent_log.set_sync(False)
    ure_logger().set_level("fine")
    ure_logger().set_sync(False)

    # Set main atomspace
    atomspace = AtomSpace()
    set_default_atomspace(atomspace)

    # Wrap environment
    wrapped_env = CartPoleWrapper(env)

    # Instantiate CartPoleAgent, and tune parameters
    cpa = CartPoleAgent(wrapped_env)
    cpa.delta = 1.0e-16

    # Run control loop
    while cpa.step():
        time.sleep(0.1)
        log.info("step_count = {}".format(cpa.step_count))

    print(f"The final reward is {cpa.accumulated_reward}.")
Пример #3
0
    def load_opencog_modules(self):
        # Init loggers
        log.set_level("debug")
        # log.set_sync(True)
        agent_log.set_level("debug")
        # agent_log.set_sync(True)
        ure_logger().set_level("info")
        # ure_logger().set_sync(True)

        # Load miner
        scheme_eval(self.atomspace, "(use-modules (opencog miner))")
        scheme_eval(self.atomspace, "(miner-logger-set-level! \"fine\")")
        # scheme_eval(self.atomspace, "(miner-logger-set-sync! #t)")

        # Load PLN
        scheme_eval(self.atomspace, "(use-modules (opencog pln))")
        # scheme_eval(self.atomspace, "(pln-load-rule 'predictive-implication-scope-direct-introduction)")
        scheme_eval(self.atomspace, "(pln-load-rule 'predictive-implication-scope-direct-evaluation)")
        # No need of predictive implication for now
        # scheme_eval(self.atomspace, "(pln-load-rule 'predictive-implication-direct-evaluation)")
        scheme_eval(self.atomspace, "(pln-log-atomspace)")
Пример #4
0
        # Create Goal
        pgoal = EvaluationLink(PredicateNode("Reward"), NumberNode("1"))
        ngoal = EvaluationLink(PredicateNode("Reward"), NumberNode("0"))

        # Call super ctor
        OpencogAgent.__init__(self, env, action_space, pgoal, ngoal)


if __name__ == "__main__":
    # Init loggers
    log.set_level("debug")
    log.set_sync(False)
    agent_log.set_level("fine")
    agent_log.set_sync(False)
    ure_logger().set_level("debug")
    ure_logger().set_sync(False)

    # Set main atomspace
    atomspace = AtomSpace()
    set_default_atomspace(atomspace)

    # Wrap environment
    wrapped_env = ChaseWrapper(env)

    # ChaseAgent
    ca = ChaseAgent(wrapped_env)

    # Training/learning loop
    lt_iterations = 2  # Number of learning-training iterations
    lt_period = 200  # Duration of a learning-training iteration
Пример #5
0
        self.monoaction_general_succeedent_mining = False
        self.polyaction_mining = False
        self.temporal_deduction = False


if __name__ == "__main__":
    # Set main atomspace
    atomspace = AtomSpace()
    set_default_atomspace(atomspace)

    # Init loggers
    log.set_level("info")
    # log.set_sync(True)
    agent_log.set_level("debug")
    # agent_log.set_sync(True)
    ure_logger().set_level("debug")
    # ure_logger().set_sync(True)
    miner_log = MinerLogger(atomspace)
    miner_log.set_level("debug")
    # miner_log.set_sync(True)

    # Wrap environment
    wrapped_env = ChaseWrapper(env)

    # ChaseAgent
    ca = ChaseAgent(wrapped_env)

    # Training/learning loop
    lt_iterations = 2  # Number of learning-training iterations
    lt_period = 200  # Duration of a learning-training iteration
    for i in range(lt_iterations):
Пример #6
0
        self.cogscm_maximum_shannon_entropy = 1
        self.cogscm_maximum_differential_entropy = 0
        self.cogscm_maximum_variables = 0


if __name__ == "__main__":
    # Set main atomspace
    atomspace = AtomSpace()
    set_default_atomspace(atomspace)

    # Init loggers
    log.set_level("info")
    # log.set_sync(True)
    agent_log.set_level("info")
    # agent_log.set_sync(True)
    ure_logger().set_level("info")
    # ure_logger().set_sync(True)
    miner_log = MinerLogger(atomspace)
    miner_log.set_level("info")
    # miner_log.set_sync(True)

    # Wrap environment
    wrapped_env = ChaseWrapper(env, atomspace)

    # ChaseAgent
    cag = ChaseAgent(wrapped_env, atomspace)

    # Log all parameters of cag, useful for debugging
    cag.log_parameters(level="debug")

    # Training/learning loop