Exemple #1
0
def train_test_agent():
    M = 10
    env = GraphSamplingEnv(max_samples=M)

    num_train_graphs = 10

    agent = BaseAgent(env=env)
    agent.learn(num_train_graphs)
    agent.test()
def run(args):
    M = 5
    env = GraphSamplingEnv(max_samples=M)

    agent = BaseAgent(env=env)
    now = datetime.now()
    logger.configure(
        dir=f"./results/fixed_env/{now.strftime(TIMESTAMP_FORMAT)}")
    agent.learn()
    agent.test()
def run(args):
    M = 3
    env = GraphSamplingEnv(max_samples=M)

    agent = BaseAgent(
        env=env,
        gamma=args["gamma"],
        learning_rate=args["learning_rate"],
        replay_buffer_size=args["replay_buffer_size"],
        exploration_schedule_steps=args["exploration_schedule_steps"],
        exploration_initial_prob=args["exploration_initial_prob"],
        exploration_final_prob=args["exploration_final_prob"],
        random_walk_sampling_args=SAMPLING_ARGS)
    now = datetime.now()
    logger.configure(dir=LOGDIR + f"{now.strftime(TIMESTAMP_FORMAT)}")
    agent.learn()
    agent.test()
from envs import GraphSamplingEnv
from agents import BaseAgent

# def train_test_agent():
print ("here")
M = 10
env = GraphSamplingEnv(max_samples=M)
num_train_graphs = 10
agent = BaseAgent(env=env)
agent.learn()#num_train_graphs)
agent.test()

# if __name__ == "__main__":
# train_test_agent()