Example #1
0
def update_NET_NUMBER_OF_DESKTOPS():
    global properties, xinerama

    old = properties["_NET_NUMBER_OF_DESKTOPS"]
    properties["_NET_NUMBER_OF_DESKTOPS"] = ptxcb.XROOT.get_number_of_desktops()

    # Add destops...
    if old < properties["_NET_NUMBER_OF_DESKTOPS"]:
        for wsid in xrange(old, properties["_NET_NUMBER_OF_DESKTOPS"]):
            Workspace.add(wsid)
            Monitor.add(wsid, xinerama)

    # Remove desktops
    elif old > properties["_NET_NUMBER_OF_DESKTOPS"]:
        for wsid in xrange(properties["_NET_NUMBER_OF_DESKTOPS"], old):
            Monitor.remove(wsid)
            Workspace.remove(wsid)
Example #2
0
class Sniffer:
    def __init__(self, **kwargs):
        self._monitor = Monitor(**kwargs)

    # Start the monitor and the sniffer
    def start(self):
        self._monitor.start()
        sniff(filter="tcp and port 80", prn=self._read_packet)

    # Create a new thread to process each packet
    def _read_packet(self, packet):
        if packet.haslayer(http.HTTPRequest):
            layer = packet.getlayer(http.HTTPRequest)
            host = layer.fields["Host"].decode("UTF-8")
            path = layer.fields["Path"].decode("UTF-8")
            self._monitor.add(host, path)
            thread = Thread(target=self._monitor.add, args=(host, path))
            thread.start()
            thread.join()
Example #3
0
    ),
    plots=[
    [
        {
            "label":        "min train score",
            "line_width":   1.0
        },
        {
            "label":        "train score"
        },
        {
            "label":        "max train score",
            "line_width":   1.0
        }
    ]
]
)
monitor.start_server(8080)
trainer = Trainer(agent, env, train_interval=1, mb_size=30, shuffle=False, reverse=False)
score_smoother = Smoothie(0.01)

for t in range(num_episodes):
    score, _, actor_metrics, critic_metrics = trainer.run_episode(learn=True)
    min_score, score_ma, max_score = score_smoother.update(score)
    print(t, score, min_score, score_ma, max_score, actor_metrics, critic_metrics)
    monitor.add(t, {
        "min train score":  min_score,
        "train score":      score_ma, 
        "max train score":  max_score
    })
Example #4
0
import gym
from ac import Agent
import numpy as np
from monitor import Monitor

agent = Agent(0.00001, 0.00005)
env = gym.make("LunarLander-v2")
score_history = []
num_episodes = 2000
monitor = Monitor("monitor.csv")
monitor.start_server(8080)

for t in range(num_episodes):
    done = False
    observation = env.reset()
    score = 0.0
    
    while not done:
        action = agent.choose_action(observation)
        observation_, reward, done, info = env.step(action)
        agent.learn(observation, action, reward, observation_, done)
        observation = observation_
        score += reward
    
    score_history.append(score)
    avg_score = np.mean(score_history[-100:])
    print("Episode:", t, "  score:", score, "  average score:", avg_score)
    monitor.add(t, average_score = avg_score)
Example #5
0
                         }, {
                             "label": "max train score",
                             "line_width": 1.0
                         }]])
monitor.start_server(8080)
trainer = SingleTrainer(agent, env)
score_smoother = Smoothie(0.01)

next_test = test_interval

for t, score in trainer.train(10000, report_interval=report_interval):
    min_score, score_ma, max_score = score_smoother.update(score)
    print("Training: episodes=%4d score: %.3f" % (t, score))
    monitor.add(
        t, {
            "train score": score,
            "min train score": min_score,
            "max train score": max_score
        })
    if t >= next_test:
        min_test, mean_test, max_test = trainer.test(10, render=render)
        print("Test after %d train episodes:" % (t, ),
              "    min, mean, max score:", min_test, mean_test, max_test)
        monitor.add(
            t, {
                "min test score": min_test,
                "max test score": max_test,
                "average test score": mean_test
            })
        next_test += test_interval
Example #6
0
tester = Tester(agent, env, tau=10.0)
monitor = Monitor("monitor.csv")
mon_server = http_server(8080, monitor)
best_average = None
best_average_t = t = 0
next_display = display_interval = 100

test_window = Window()
avg_score_window = Window()

mon_server.start()

for num_trained, actor_metrics, critic_metrics, score, avg_score in trainer.train(
        num_episodes, tau=0.2):
    avg_score_window << avg_score
    test_score = tester.test()
    test_window << test_score
    print("Episodes: %6d   score: %+8.2f   average score: %+8.2f   average score_window: %+8.2f:%+8.2f   test score: %+8.2f" % \
            (num_trained, score, avg_score, avg_score_window.Low, avg_score_window.High, test_score))

    monitor.add(num_trained,
                score=score,
                avg_score=avg_score,
                avg_low=avg_score_window.Low,
                avg_high=avg_score_window.High
                #test_score=test_score
                )
    if num_trained >= next_display:
        tester.display(tau=1.0)
        next_display += display_interval
env = gym.make('LunarLander-v2')
score_history = []
num_episodes = 3000

for t in range(num_episodes):
    done = False
    score = 0
    observation = env.reset()
    actor_metrics, critic_metrics = None, None
    while not done:
        action = agent.choose_action(observation)
        observation_, reward, done, info = env.step(action)
        actor_metrics, critic_metrics = agent.learn(observation, action,
                                                    reward, observation_, done)
        observation = observation_
        score += reward

    score_history.append(score)
    min_score, score_ma, max_score = score_smoother.update(score)
    print(t, score, min_score, score_ma, max_score, actor_metrics,
          critic_metrics)
    monitor.add(t,
                score_MA=score_ma,
                min_score=min_score,
                max_score=max_score,
                score=score)

filename = 'LunarLander.png'
plotLearning(score_history, filename=filename, window=100)