Ejemplo n.º 1
0
    def generate_paper(self, templates, dst_dir):
        t = TimeTable(self.timetable_source_file)
        timetable_headings, timetables = t.gen_paper_table()

        r = Room(self.room_source_file)
        room_headings, room_tables = r.gen_room_table()

        v = {
            'cover': {
                'sponsor': self.gen_sponsor_table_2(self.sponsor_source_file),
            },
            'timetable': {
                'timetable_headings': timetable_headings,
                'timetables': timetables,
            },
            'room': {

                'room_headings': room_headings,
                'room_tables': room_tables,
            },
            'ryokan': {
                'ryokan': self.md_converter('paper_ryokan')
            },
            'info': {
                'info': self.md_converter('paper_info')
            }
        }
        env = Environment(loader=FileSystemLoader('./', encoding='utf-8'))
        for page_name in list(templates.keys()):
            tmpl = env.get_template(templates[page_name])
            with open(os.path.join(dst_dir, page_name + '.html'), 'w') as f:
                f.write(tmpl.render(v[page_name]))
    def __init__(self, server_id, port=80):
        self.socket = socket.socket()
        self.host = socket.gethostname()
        self.socket.bind((self.host, port))
        self.data = replicated.ReplicatedDictionary()
        self.log = replicated.ReplicatedLog()
        self.server_id = int(server_id)
        self.time_table = TimeTable(self.server_id, 3)
        self.threads = []
        # self.run = True

        self.init_connection()
Ejemplo n.º 3
0
 def __init__(self, process_name):
     super(Scheduler, self).__init__(process_name)
     self.logger.info('Starting %s' % self.process_name)
     self.publishers = PublishersPool(self.logger)
     self.thread_handlers = dict()
     self.lock = Lock()
     self.timetable = TimeTable(self.logger)
     self.regular_pipeline = RegularPipeline(self, self.timetable)
     self.hadoop_pipeline = HadoopPipeline(self, self.timetable)
     self.logger.info('Started %s' % self.process_name)
    def __init__(self, server_id, port=80):
        self.socket = socket.socket()
        self.host = socket.gethostname()
        self.socket.bind((self.host, port))
        self.data = replicated.ReplicatedDictionary()
        self.log = replicated.ReplicatedLog()
        self.server_id = int(server_id)
        self.time_table = TimeTable(self.server_id, 3)
        self.threads = []
        # self.run = True

        self.init_connection()
Ejemplo n.º 5
0
    def generate_web(self, template, dst):
        timetable_headings, timetables = TimeTable(self.timetable_source_file).gen_tables()
        abst_headings, abst_tables = AbstTable(self.timetable_source_file).gen_tables()
        room_headings, room_tables = Room(self.room_source_file).gen_room_table_t()

        env = Environment(loader=FileSystemLoader('./', encoding='utf-8'))
        tmpl = env.get_template(template)
        v = {
            'timetables': timetables,
            'timetable_headings': timetable_headings,
            'abst_headings': abst_headings,
            'abst_tables': abst_tables,
            'info': self.md_converter('info'),
            'room_headings': room_headings,
            'room_tables': room_tables,
            'sponsor': self.gen_sponsor_table(self.sponsor_source_file),
            'ryokan': self.md_converter('ryokan'),
            'participant_table': Participant(self.participant_source_file).gen_table()
        }
        with open(dst, 'w') as f:
            f.write(tmpl.render(v))
class Server:
    def __init__(self, server_id, port=80):
        self.socket = socket.socket()
        self.host = socket.gethostname()
        self.socket.bind((self.host, port))
        self.data = replicated.ReplicatedDictionary()
        self.log = replicated.ReplicatedLog()
        self.server_id = int(server_id)
        self.time_table = TimeTable(self.server_id, 3)
        self.threads = []
        # self.run = True

        self.init_connection()

    def init_connection(self):
        self.socket.listen(5)
        print("Server with id=",
              self.server_id,
              " is running and listening for incoming connections",
              sep="")

        while True:
            # close = raw_input("Close server? y/n")
            # if close == "y":
            #     break
            c, addr = self.socket.accept()
            print("Connected to", addr)
            c.send("Connection to server was successful")
            client = ClientHandler(c, addr, self)
            client.start()
            self.threads.append(client)

        self.socket.close()
        for client in self.threads:
            client.join()

    def send_message(self, msg, ip):
        data = pickle.dumps(msg)
        address = (ip, 80)
        self.socket.sendto(data, address)

    def post(self, msg, author):
        self.increment_time()
        entry = replicated.Entry(msg, author, self.time_table.get_self_clock(),
                                 self.server_id)
        self.data.add_post(entry)
        self.log.add_entry(entry)
        print("Post has been submitted at local time",
              self.time_table.get_self_clock())

    def lookup(self, c):
        package = pickle.dumps(self.data)
        c.send(package)

    def sync(self, sync_server):
        address = (server_addresses[sync_server], 80)
        sock = socket.socket()
        sock.connect(address)
        sock.recv(4096)

        sock.send("update_contents_on_my_server")
        recv = sock.recv(4096)
        (other_log, other_time_table) = pickle.loads(recv)

        sock.close()

        for e in other_log:
            not_in = self.data.is_not_in(e)
            if not_in:
                self.data.add_post(e)
                self.log.add_entry(e)

        # for e in other_log:
        #     for k in self.data.dict.values():
        #         if (e.get_time_stamp() != k.get_time_stamp()) and (e.get_parent_server() != k.get_parent_server()):
        #             self.data.add_post(e)
        #             self.log.add_entry(e)

        self.time_table.sync_tables(other_time_table)
        self.garbage_collect_log()

    def received_sync(self, client, id):
        log = self.compile_log(other_server_id=id)
        data = (log, self.time_table)
        data_pickled = pickle.dumps(data)
        client.send(data_pickled)
        #print("Msg sent")

    # Compiles log to send when another server wants to sync with this server
    def compile_log(self, other_server_id):
        subLog = []
        for e in self.log.get_log():
            # The id of server where entry was first posted
            entry_server_id = e.get_parent_server()

            if self.time_table.table[other_server_id][
                    entry_server_id] < e.get_time_stamp():
                subLog.append(e)

        return subLog

    def garbage_collect_log(self):
        # Entry i is the clock time at server i, to which point this server knows
        # that all other servers know about events at server i
        max_common_clocks = column_min_vals(self.time_table.table)

        for e in self.log.get_log():
            if e.get_time_stamp() <= max_common_clocks[e.get_parent_server()]:
                self.log.remove_entry(e)

    def increment_time(self):
        self.time_table.increment_self()

    def close_connection(self):
        self.socket.close()
class Server:
    def __init__(self, server_id, port=80):
        self.socket = socket.socket()
        self.host = socket.gethostname()
        self.socket.bind((self.host, port))
        self.data = replicated.ReplicatedDictionary()
        self.log = replicated.ReplicatedLog()
        self.server_id = int(server_id)
        self.time_table = TimeTable(self.server_id, 3)
        self.threads = []
        # self.run = True

        self.init_connection()

    def init_connection(self):
        self.socket.listen(5)
        print("Server with id=", self.server_id, " is running and listening for incoming connections", sep="")

        while True:
            # close = raw_input("Close server? y/n")
            # if close == "y":
            #     break
            c, addr = self.socket.accept()
            print("Connected to", addr)
            c.send("Connection to server was successful")
            client = ClientHandler(c, addr, self)
            client.start()
            self.threads.append(client)


        self.socket.close()
        for client in self.threads:
            client.join()

    def send_message(self, msg, ip):
        data = pickle.dumps(msg)
        address = (ip, 80)
        self.socket.sendto(data, address)

    def post(self, msg, author):
        self.increment_time()
        entry = replicated.Entry(msg, author, self.time_table.get_self_clock(), self.server_id)
        self.data.add_post(entry)
        self.log.add_entry(entry)
        print("Post has been submitted at local time", self.time_table.get_self_clock())

    def lookup(self, c):
        package = pickle.dumps(self.data)
        c.send(package)

    def sync(self, sync_server):
        address = (server_addresses[sync_server], 80)
        sock = socket.socket()
        sock.connect(address)
        sock.recv(4096)

        sock.send("update_contents_on_my_server")
        recv = sock.recv(4096)
        (other_log, other_time_table) = pickle.loads(recv)

        sock.close()

        for e in other_log:
            not_in = self.data.is_not_in(e)
            if not_in:
                self.data.add_post(e)
                self.log.add_entry(e)

        # for e in other_log:
        #     for k in self.data.dict.values():
        #         if (e.get_time_stamp() != k.get_time_stamp()) and (e.get_parent_server() != k.get_parent_server()):
        #             self.data.add_post(e)
        #             self.log.add_entry(e)

        self.time_table.sync_tables(other_time_table)
        self.garbage_collect_log()

    def received_sync(self, client, id):
        log = self.compile_log(other_server_id=id)
        data = (log, self.time_table)
        data_pickled = pickle.dumps(data)
        client.send(data_pickled)
        #print("Msg sent")

    # Compiles log to send when another server wants to sync with this server
    def compile_log(self, other_server_id):
        subLog = []
        for e in self.log.get_log():
            # The id of server where entry was first posted
            entry_server_id = e.get_parent_server()

            if self.time_table.table[other_server_id][entry_server_id] < e.get_time_stamp():
                subLog.append(e)

        return subLog

    def garbage_collect_log(self):
        # Entry i is the clock time at server i, to which point this server knows
        # that all other servers know about events at server i
        max_common_clocks = column_min_vals(self.time_table.table)

        for e in self.log.get_log():
            if e.get_time_stamp() <= max_common_clocks[e.get_parent_server()]:
                self.log.remove_entry(e)

    def increment_time(self):
        self.time_table.increment_self()

    def close_connection(self):
        self.socket.close()
Ejemplo n.º 8
0
class Scheduler(SynergyProcess):
    """ Scheduler encapsulate logic for starting the aggregators/alerts/other readers """

    def __init__(self, process_name):
        super(Scheduler, self).__init__(process_name)
        self.logger.info('Starting %s' % self.process_name)
        self.publishers = PublishersPool(self.logger)
        self.thread_handlers = dict()
        self.lock = Lock()
        self.timetable = TimeTable(self.logger)
        self.regular_pipeline = RegularPipeline(self, self.timetable)
        self.hadoop_pipeline = HadoopPipeline(self, self.timetable)
        self.logger.info('Started %s' % self.process_name)


    def __del__(self):
        for handler in self.thread_handlers:
            handler.cancel()
        self.thread_handlers.clear()
        super(Scheduler, self).__del__()


    def _log_message(self, level, process_name, time_record, msg):
        """ method performs logging into log file and TimeTable node"""
        self.timetable.add_log_entry(process_name, time_record, datetime.utcnow(), msg)
        self.logger.log(level, msg)


    # **************** Scheduler Methods ************************
    @with_reconnect
    def start(self):
        """ reading scheduler configurations and starting timers to trigger events """
        collection = CollectionContext.get_collection(self.logger, COLLECTION_SCHEDULER_CONFIGURATION)
        cursor = collection.find({})
        if cursor.count() == 0:
            raise LookupError('MongoDB has no scheduler configuration entries')

        for entry in cursor:
            document = SchedulerConfigurationEntry(entry)
            interval = document.get_interval()
            is_active = document.get_process_state() == SchedulerConfigurationEntry.STATE_ON
            type = ProcessContext.get_type(document.get_process_name())
            parameters = [document.get_process_name(), document]

            if type == TYPE_ALERT:
                function = self.fire_alert
            elif type == TYPE_HORIZONTAL_AGGREGATOR:
                function = self.fire_worker
            elif type == TYPE_VERTICAL_AGGREGATOR:
                function = self.fire_worker
            elif type == TYPE_GARBAGE_COLLECTOR:
                function = self.fire_garbage_collector
            else:
                self.logger.error('Can not start scheduler for %s since it has no processing function' % type)
                continue

            handler = RepeatTimer(interval, function, args=parameters)
            self.thread_handlers[document.get_process_name()] = handler

            if is_active:
                handler.start()
                self.logger.info('Started scheduler for %s:%s, triggering every %d seconds'\
                % (type, document.get_process_name(), interval))
            else:
                self.logger.info('Handler for %s:%s registered in Scheduler. Idle until activated.'\
                % (type, document.get_process_name()))

        # as Scheduler is now initialized and running - we can safely start its MX
        self.start_mx()


    def start_mx(self):
        """ method's only purpose: import MX module (which has back-reference import to scheduler) and start it """
        from mx.mx import MX
        self.mx = MX(self)
        self.mx.start_mx_thread()


    def fire_worker(self, *args):
        """requests vertical aggregator (hourly site, daily variant, etc) to start up"""
        try:
            process_name = args[0]
            self.lock.acquire()
            self.logger.info('%s {' % process_name)
            time_record = self.timetable.get_next_timetable_record(process_name)
            time_qualifier = ProcessContext.get_time_qualifier(process_name)

            if time_qualifier == ProcessContext.QUALIFIER_HOURLY:
                self.regular_pipeline.manage_pipeline_for_process(process_name, time_record)
            else:
                self.hadoop_pipeline.manage_pipeline_for_process(process_name, time_record)

        except (AMQPException, IOError) as e:
            self.logger.error('AMQPException: %s' % str(e), exc_info=True)
            self.publishers.reset_all_publishers(suppress_logging=True)
        except Exception as e:
            self.logger.error('Exception: %s' % str(e), exc_info=True)
        finally:
            self.logger.info('}')
            self.lock.release()


    def fire_alert(self, *args):
        """ Triggers AlertWorker. Makes sure its <dependent on> trees have
            finalized corresponding timeperiods prior to that"""
        try:
            process_name = args[0]
            self.lock.acquire()
            self.logger.info('%s {' % process_name)

            time_record = self.timetable.get_next_timetable_record(process_name)
            self.hadoop_pipeline.manage_pipeline_with_blocking_dependencies(process_name, time_record)
        except (AMQPException, IOError) as e:
            self.logger.error('AMQPException: %s' % str(e), exc_info=True)
            self.publishers.reset_all_publishers(suppress_logging=True)
        except Exception as e:
            self.logger.error('Exception: %s' % str(e), exc_info=True)
        finally:
            self.logger.info('}')
            self.lock.release()


    def fire_garbage_collector(self, *args):
        """fires garbage collector to re-run all invalid records"""
        try:
            process_name = args[0]
            self.lock.acquire()
            self.logger.info('%s {' % process_name)

            self.publishers.get_publisher(process_name).publish({})
            self.logger.info('Publishing trigger for garbage_collector')
            self.timetable.build_tree()
            self.timetable.validate()
            self.logger.info('Validated Timetable for all trees')
        except (AMQPException, IOError) as e:
            self.logger.error('AMQPException: %s' % str(e), exc_info=True)
            self.publishers.reset_all_publishers(suppress_logging=True)
        except Exception as e:
            self.logger.error('fire_garbage_collector: %s' % str(e))
        finally:
            self.logger.info('}')
            self.lock.release()