Ejemplo n.º 1
0
class RedisSubscriber(object):
    def __init__(self, topic, host='localhost', port=6379, db=0):
        self.rqueue = RedisQueue(topic, 1, host=host, port=port, db=db)

    def redis_recv_pyobj(self, blocking=True):
        item = self.rqueue.get(isBlocking=blocking)
        if item is None:
            return None
        return pkl.loads(item)

    def redis_recv(self, blocking=True):
        return self.rqueue.get(isBlocking=blocking)
Ejemplo n.º 2
0
class RedisQueueWorker(object):
    def __init__(self,
                 redis_mgr,
                 service_name,
                 custom_key,
                 func_name,
                 callback_to_main_thread=False):
        self.service_name = service_name
        self.func_name = func_name
        self.redis_queue = RedisQueue(redis_mgr)
        self.custom_key = custom_key
        self.redis_queue.subscribe(self.service_name, custom_key)
        self.callback_to_main_thread = callback_to_main_thread

    def _real_start(self):
        while True:
            try:
                item = self.redis_queue.get(self.custom_key)
                if self.callback_to_main_thread:
                    IOLoop.instance().add_callback(self.func_name, item)
                else:
                    self.func_name(item)
            except Exception, e:
                logger.warn("start_work error:%s not found msg", e.message)
                time.sleep(1)
Ejemplo n.º 3
0
class WebServerClass(BaseHTTPRequestHandler):
    def __init__(self, *args, **kwargs):
        self.singleton = Singleton()
        self.queue_chart = RedisQueue(name="data_chart",
                                      namespace="data_chart")
        super(WebServerClass, self).__init__(*args, **kwargs)

    def _set_headers(self):
        self.send_response(200)
        self.send_header('Content-type', 'text/html')
        self.end_headers()

    def do_GET(self):

        if self.path == "/":
            self.path = self.singleton.template_path

        self._set_headers()

        f = open(self.singleton.template_path, "rb")
        self.wfile.write(f.read())
        f.close()
        while True:
            data = self.queue_chart.get().decode("utf-8")
            data = data.replace("\'", "\"")
            data = json.loads(data)
            if data:
                self.wfile.write(
                    "<script type=\"text/javascript\">AddDataChart(chart_id={}, data={});</script>"
                    .format(  # noqa
                        data["chart_id"],
                        data["data"]).encode(encoding='utf_8'))
        return
Ejemplo n.º 4
0
class A3C(object):

    def __init__(self):
        self.device = '/gpu:0' if USE_GPU else '/cpu:0'
        self.stop_requested = False
        self.global_t = 0
        if USE_LSTM:
            self.global_network = A3CLSTMNetwork(STATE_DIM, STATE_CHN, ACTION_DIM, self.device, -1)
        else:
            self.global_network = A3CFFNetwork(STATE_DIM, STATE_CHN, ACTION_DIM, self.device)
        self.global_network.create_loss(ENTROPY_BETA)

        self.initial_learning_rate = log_uniform(INITIAL_ALPHA_LOW, INITIAL_ALPHA_HIGH, INITIAL_ALPHA_LOG_RATE)
        print 'initial_learning_rate:', self.initial_learning_rate
        self.learning_rate_input = tf.placeholder('float')
        self.optimizer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate_input,
                                                   decay=RMSP_ALPHA, momentum=0.0, epsilon=RMSP_EPSILON)

        grads_and_vars = self.optimizer.compute_gradients(
            self.global_network.total_loss, self.global_network.get_vars())
        self.apply_gradients = self.optimizer.apply_gradients(grads_and_vars)

        self.actor_threads = []
        for i in range(PARALLEL_SIZE):
            actor_thread = A3CActorThread(i, self.global_network)
            self.actor_threads.append(actor_thread)

        self.sess = tf.InteractiveSession()
        self.sess.run(tf.initialize_all_variables())

        self.reward_input = tf.placeholder(tf.float32)
        tf.scalar_summary('reward', self.reward_input)

        self.time_input = tf.placeholder(tf.float32)
        tf.scalar_summary('living_time', self.time_input)

        self.summary_op = tf.merge_all_summaries()
        self.summary_writer = tf.train.SummaryWriter(LOG_FILE, self.sess.graph)

        self.saver = tf.train.Saver()
        self.restore()

        self.lock = threading.Lock()
        self.rq = RedisQueue(REDIS_QUEUE_NAME)
        self.train_count = 0
        return

    def restore(self):
        checkpoint = tf.train.get_checkpoint_state(CHECKPOINT_DIR)
        if checkpoint and checkpoint.model_checkpoint_path:
            self.saver.restore(self.sess, checkpoint.model_checkpoint_path)
            print("checkpoint loaded:", checkpoint.model_checkpoint_path)
            tokens = checkpoint.model_checkpoint_path.split("-")
            # set global step
            self.global_t = int(tokens[1])
            print(">>> global step set: ", self.global_t)
        else:
            print("Could not find old checkpoint")
        return

    def backup(self):
        if not os.path.exists(CHECKPOINT_DIR):
            os.mkdir(CHECKPOINT_DIR)

        self.saver.save(self.sess, CHECKPOINT_DIR + '/' + 'checkpoint', global_step=self.global_t)
        return

    def predict_function(self, parallel_index, lock):
        actor_thread = self.actor_threads[parallel_index]
        while True:
            if self.stop_requested or (self.global_t > MAX_TIME_STEP):
                break
            diff_global_t = actor_thread.process(
                self.sess, self.global_t,
                self.summary_writer, self.summary_op,
                self.reward_input, self.time_input
            )

            self.global_t += diff_global_t
            if self.global_t % 1000000 < LOCAL_T_MAX:
                self.backup()
            # print 'global_t:', self.global_t
        return

    def train_function(self, index, lock):
        batch_state = []
        batch_action = []
        batch_td = []
        batch_R = []

        while True:
            if self.stop_requested or (self.global_t > MAX_TIME_STEP):
                break
            data = self.rq.get()
            (state, action, td, R) = cPickle.loads(data)

            batch_state.append(state)
            batch_action.append(action)
            batch_td.append(td)
            batch_R.append(R)

            if len(batch_R) < BATCH_SIZE:
                continue

            lock.acquire()
            self.sess.run(self.apply_gradients, feed_dict={
                self.global_network.state_input: batch_state,
                self.global_network.action_input: batch_action,
                self.global_network.td: batch_td,
                self.global_network.R: batch_R,
                self.learning_rate_input: self.initial_learning_rate
            })
            self.train_count += 1
            lock.release()

            batch_state = []
            batch_action = []
            batch_td = []
            batch_R = []

            print 'train_index:', index, 'train_count:', self.train_count
        return

    def signal_handler(self, signal_, frame_):
        print 'You pressed Ctrl+C !'
        self.stop_requested = True
        return

    def run(self):
        predict_treads = []
        for i in range(PARALLEL_SIZE):
            predict_treads.append(threading.Thread(target=self.predict_function, args=(i, self.lock)))

        signal.signal(signal.SIGINT, self.signal_handler)

        for t in predict_treads:
            t.start()

        train_threads = []
        for i in range(TRAIN_SIZE):
            train_threads.append(threading.Thread(target=self.train_function, args=(i, self.lock)))
            train_threads[i].start()

        print 'Press Ctrl+C to stop'
        signal.pause()

        print 'Now saving data....'
        for t in predict_treads:
            t.join()
        for t in train_threads:
            t.join()

        self.backup()
        return
Ejemplo n.º 5
0
class Sender(threading.Thread):
    def __init__(self, status_queue, stop_event, config):
        super(Sender, self).__init__()

        self.normal_data_queue = RedisQueue('normal')
        self.retry_data_queue = RedisQueue('retry')
        self.status_queue = status_queue
        self.stop_event = stop_event

        self.base_url = config["api_url"]
        self.key = config["key"]
        self.store_energy_url = self.base_url + "/v2/energy"
        self.backup_file = "backup"
        self.console_mode = True if config["console_mode"] == "true" else False

        self.connected = False

    def run(self):
        self.send_message_to_listeners(Status.RUNNING,
                                       description="Sender has been started")

        while not self.stop_event.is_set():
            if not self.connected:
                self.connect_to_api()

            while self.connected:
                retry_data = self.read_messages_from_retry_queue()

                if len(retry_data) > 0:
                    self.send_data_to_api(retry_data)
                    break

                normal_data = self.read_messages_from_normal_queue()

                if len(normal_data) > 0:
                    self.send_data_to_api(normal_data)
                    break

                time.sleep(1)

            time.sleep(5)

        self.send_message_to_listeners(
            Status.STOPPED, description="Sender has been terminated")

    def read_messages_from_retry_queue(self):
        retry_data = []

        while not self.retry_data_queue.empty():
            retry_message = self.retry_data_queue.get()
            retry_data.append(json.loads(retry_message.decode('utf-8')))

            if len(retry_data) > 30:
                break

        return retry_data

    def read_messages_from_normal_queue(self):
        normal_data = []

        while not self.normal_data_queue.empty():
            normal_message = self.normal_data_queue.get()
            normal_data.append(json.loads(normal_message.decode('utf-8')))

            if len(normal_data) > 30:
                break

        return normal_data

    def connect_to_api(self):
        try:
            response = requests.get(self.base_url)
            self.connected = response.status_code == requests.codes.ok

            if response.status_code == requests.codes.ok:
                self.connected = True
                self.send_message_to_listeners(
                    Status.RUNNING,
                    description="Connected to server running on {}".format(
                        self.base_url))

        except requests.exceptions.ConnectionError as e:
            self.connected = False
            self.send_message_to_listeners(Status.RUNNING,
                                           Error.SERVER_UNREACHABLE,
                                           "Could not connect to the server")

    def send_data_to_api(self, messages):
        headers = {
            'Content-type': 'application/json',
            'Accept': 'application/json'
        }

        try:
            response = requests.post(self.store_energy_url,
                                     data=json.dumps({
                                         'data': messages,
                                         "rpi_key": self.key
                                     }),
                                     headers=headers)

            if response.status_code == requests.codes.created:
                if self.console_mode:
                    self.send_message_to_listeners(
                        Status.RUNNING,
                        description="Succesfully stored energy data")
                return

            if response.status_code == requests.codes.unauthorized:
                self.send_message_to_listeners(
                    Status.STOPPED, Error.UNAUTHORIZED,
                    "Could not authorize with given key")
                self.stop_event.set()

        except requests.exceptions.ConnectionError as e:
            self.send_message_to_listeners(Status.RUNNING,
                                           Error.SERVER_UNREACHABLE,
                                           "Could not reach the server")

            self.connected = False

            for message in messages:
                self.retry_data_queue.put(json.dumps(message))

    def send_message_to_listeners(self, status, error=None, description=None):
        message = dict()
        message["thread"] = Thread.SENDER
        message["status"] = status

        if error is not None:
            message["error"] = error

        if message is not None:
            message["description"] = description

        self.status_queue.put(message)
Ejemplo n.º 6
0
class MyTaskSet(CountResults):
    def __init__(self, time_execution_in_sec, chart_title, slave, *args,
                 **kwargs):
        super(MyTaskSet, self).__init__(time_execution_in_sec, chart_title,
                                        slave, *args, **kwargs)
        self.running = True
        self.slave = slave
        self.code = None

        self.queue_chart = RedisQueue(name="data_chart",
                                      namespace="data_chart")
        self.queue_tasks = RedisQueue(name="data_tasks",
                                      namespace="data_tasks")
        self.chart = ReportCharts(time_execution_in_sec, chart_title,
                                  self.slave)
        self.db = create_engine(self.config["database"]["db_string"])

    def purge_queues(self):
        self.queue_chart.purge()
        self.queue_tasks.purge()
        self.queue_data.purge()

    def set_tasks(self):
        while self.running:
            self.queue_tasks.put("heartbeat")

    def vacuum(self):
        try:
            self.db.execute("vacuum analyze films;")
            self.db.execute("vacuum films;")

        except InternalError:
            from table import Films
            films = Films()
            films.metadata.create_all(bind=self.db)
        return

    def run(self, thread=0):

        self.chart.update_chart(self.queue_chart, 5, "thread", data=1)
        while self.running and self.queue_tasks.get():
            try:
                self.read()
                self.write()
            except Exception as e:
                self.RESPONSE_TIME_AVERAGE["errors"] += 1

    def read(self):
        self.db.execute("SELECT * FROM films;".format(
            str(uuid.uuid4())[-5:], random.randint(0, self.LIMIT * 100)))

    def write(self):
        # INSERTS
        self.code = str(uuid.uuid4())[-5:]
        self.db.execute(
            "INSERT into films (code, title, did, kind) VALUES('{}', 'test', {}, 't');"
            .format(  # noqa
                self.code, random.randint(0, self.LIMIT * 100)))
        # UPDATES
        new_code = str(uuid.uuid4())[-5:]
        self.db.execute("UPDATE films set code='{}' where code='{}';".format(
            new_code, self.code))

    def on_finish(self):
        self.running = False
        time.sleep(5)
        print("Getting time here to wait all queue get empty")

        while self.queue_data.qsize() > 0 or self.queue_chart.qsize() > 0:
            print("Waiting finishing all pendents query")
            time.sleep(1)

        if not self.slave:
            table = PrettyTable([
                "Item", "Total", "Average Execution (sec)", "Total Errors",
                "Total Executed (sec)"
            ])

            table.add_row([
                "INSERTS", self.RESPONSE_TIME_AVERAGE["count"]["insert"],
                self.RESPONSE_TIME_AVERAGE["average"]["insert"], "", ""
            ])
            table.add_row([
                "UPDATES", self.RESPONSE_TIME_AVERAGE["count"]["update"],
                self.RESPONSE_TIME_AVERAGE["average"]["update"], "", ""
            ])
            table.add_row([
                "SELECTS", self.RESPONSE_TIME_AVERAGE["count"]["select"],
                self.RESPONSE_TIME_AVERAGE["average"]["select"], "", ""
            ])
            table.add_row([
                "", "", "", self.RESPONSE_TIME_AVERAGE["errors"],
                "Finished execution after {} seconds".format(self.TIMING)
            ])

            print(table)

        while self.queue_data.qsize() > 0 or self.queue_chart.qsize() > 0:
            print("Waiting finishing all pendents query")
            time.sleep(1)

        self.purge_queues()

        print("Finished! See http://localhost:9111/ to full report")
        os._exit(0)