Exemplo n.º 1
0
 def process_remote_event(self, src_curs, dst_curs, ev):
     """handle one event"""
     self.log.debug("New event: id=%s / type=%s / data=%s / extra1=%s" % (ev.id, ev.type, ev.data, ev.extra1))
     if ev.type in ('I', 'U', 'D'):
         self.handle_data_event(ev, dst_curs)
     elif ev.type[:2] in ('I:', 'U:', 'D:'):
         self.handle_data_event(ev, dst_curs)
     elif ev.type == "R":
         self.flush_sql(dst_curs)
         self.handle_truncate_event(ev, dst_curs)
     elif ev.type == 'EXECUTE':
         self.flush_sql(dst_curs)
         self.handle_execute_event(ev, dst_curs)
     elif ev.type == 'londiste.add-table':
         self.flush_sql(dst_curs)
         self.add_set_table(dst_curs, ev.data)
     elif ev.type == 'londiste.remove-table':
         self.flush_sql(dst_curs)
         self.remove_set_table(dst_curs, ev.data)
     elif ev.type == 'londiste.remove-seq':
         self.flush_sql(dst_curs)
         self.remove_set_seq(dst_curs, ev.data)
     elif ev.type == 'londiste.update-seq':
         self.flush_sql(dst_curs)
         self.update_seq(dst_curs, ev)
     else:
         CascadedWorker.process_remote_event(self, src_curs, dst_curs, ev)
Exemplo n.º 2
0
    def process_remote_batch(self, src_db, tick_id, ev_list, dst_db):
        "All work for a batch.  Entry point from SetConsumer."

        # this part can play freely with transactions

        self.sync_database_encodings(src_db, dst_db)
        
        self.cur_tick = self._batch_info['tick_id']
        self.prev_tick = self._batch_info['prev_tick_id']

        dst_curs = dst_db.cursor()
        self.load_table_state(dst_curs)
        self.sync_tables(src_db, dst_db)

        self.copy_snapshot_cleanup(dst_db)

        # only main thread is allowed to restore fkeys
        if not self.copy_thread:
            self.restore_fkeys(dst_db)

        # now the actual event processing happens.
        # they must be done all in one tx in dst side
        # and the transaction must be kept open so that
        # the cascade-consumer can save last tick and commit.

        self.sql_list = []
        CascadedWorker.process_remote_batch(self, src_db, tick_id, ev_list, dst_db)
        self.flush_sql(dst_curs)

        # finalize table changes
        self.save_table_state(dst_curs)
Exemplo n.º 3
0
    def process_root_node(self, dst_db):
        """On root node send seq changes to queue."""

        CascadedWorker.process_root_node(self, dst_db)

        q = "select * from londiste.root_check_seqs(%s)"
        self.exec_cmd(dst_db, q, [self.queue_name])
Exemplo n.º 4
0
 def process_remote_event(self, src_curs, dst_curs, ev):
     """handle one event"""
     self.log.debug("New event: id=%s / type=%s / data=%s / extra1=%s" %
                    (ev.id, ev.type, ev.data, ev.extra1))
     if ev.type in ('I', 'U', 'D'):
         self.handle_data_event(ev, dst_curs)
     elif ev.type[:2] in ('I:', 'U:', 'D:'):
         self.handle_data_event(ev, dst_curs)
     elif ev.type == "R":
         self.flush_sql(dst_curs)
         self.handle_truncate_event(ev, dst_curs)
     elif ev.type == 'EXECUTE':
         self.flush_sql(dst_curs)
         self.handle_execute_event(ev, dst_curs)
     elif ev.type == 'londiste.add-table':
         self.flush_sql(dst_curs)
         self.add_set_table(dst_curs, ev.data)
     elif ev.type == 'londiste.remove-table':
         self.flush_sql(dst_curs)
         self.remove_set_table(dst_curs, ev.data)
     elif ev.type == 'londiste.remove-seq':
         self.flush_sql(dst_curs)
         self.remove_set_seq(dst_curs, ev.data)
     elif ev.type == 'londiste.update-seq':
         self.flush_sql(dst_curs)
         self.update_seq(dst_curs, ev)
     else:
         CascadedWorker.process_remote_event(self, src_curs, dst_curs, ev)
Exemplo n.º 5
0
    def process_root_node(self, dst_db):
        """On root node send seq changes to queue."""

        CascadedWorker.process_root_node(self, dst_db)

        q = "select * from londiste.root_check_seqs(%s)"
        self.exec_cmd(dst_db, q, [self.queue_name])
Exemplo n.º 6
0
    def process_remote_batch(self, src_db, tick_id, ev_list, dst_db):
        "All work for a batch.  Entry point from SetConsumer."

        # this part can play freely with transactions

        if not self.code_check_done:
            self.check_code(dst_db)
            self.code_check_done = 1

        self.sync_database_encodings(src_db, dst_db)

        self.cur_tick = self.batch_info["tick_id"]
        self.prev_tick = self.batch_info["prev_tick_id"]

        dst_curs = dst_db.cursor()
        self.load_table_state(dst_curs)
        self.sync_tables(src_db, dst_db)

        self.copy_snapshot_cleanup(dst_db)

        # only main thread is allowed to restore fkeys
        if not self.copy_thread:
            self.restore_fkeys(dst_db)

        for p in self.used_plugins.values():
            p.reset()
        self.used_plugins = {}

        # now the actual event processing happens.
        # they must be done all in one tx in dst side
        # and the transaction must be kept open so that
        # the cascade-consumer can save last tick and commit.

        self.sql_list = []
        CascadedWorker.process_remote_batch(self, src_db, tick_id, ev_list, dst_db)
        self.flush_sql(dst_curs)

        for p in self.used_plugins.values():
            p.finish_batch(self.batch_info, dst_curs)
        self.used_plugins = {}

        # finalize table changes
        self.save_table_state(dst_curs)

        # store event filter
        if self.cf.getboolean("local_only", False):
            if self.copy_thread:
                _filterlist = skytools.quote_literal(self.copy_table_name)
            else:
                _filterlist = ",".join(map(skytools.quote_literal, self.table_map.keys()))
            self.consumer_filter = (
                """
((ev_type like 'pgq%%' or ev_type like 'londiste%%')
or (ev_extra1 in (%s)))
"""
                % _filterlist
            )
        else:
            self.consumer_filter = None
Exemplo n.º 7
0
    def process_remote_batch(self, src_db, tick_id, ev_list, dst_db):
        "All work for a batch.  Entry point from SetConsumer."

        # this part can play freely with transactions

        if not self.code_check_done:
            self.check_code(dst_db)
            self.code_check_done = 1

        self.sync_database_encodings(src_db, dst_db)

        self.cur_tick = self.batch_info['tick_id']
        self.prev_tick = self.batch_info['prev_tick_id']

        dst_curs = dst_db.cursor()
        self.load_table_state(dst_curs)
        self.sync_tables(src_db, dst_db)

        self.copy_snapshot_cleanup(dst_db)

        # only main thread is allowed to restore fkeys
        if not self.copy_thread:
            self.restore_fkeys(dst_db)

        for p in self.used_plugins.values():
            p.reset()
        self.used_plugins = {}

        # now the actual event processing happens.
        # they must be done all in one tx in dst side
        # and the transaction must be kept open so that
        # the cascade-consumer can save last tick and commit.

        self.sql_list = []
        CascadedWorker.process_remote_batch(self, src_db, tick_id, ev_list,
                                            dst_db)
        self.flush_sql(dst_curs)

        for p in self.used_plugins.values():
            p.finish_batch(self.batch_info, dst_curs)
        self.used_plugins = {}

        # finalize table changes
        self.save_table_state(dst_curs)

        # store event filter
        if self.cf.getboolean('local_only', False):
            if self.copy_thread:
                _filterlist = skytools.quote_literal(self.copy_table_name)
            else:
                _filterlist = ','.join(
                    map(skytools.quote_literal, self.table_map.keys()))
            self.consumer_filter = """
((ev_type like 'pgq%%' or ev_type like 'londiste%%')
or (ev_extra1 in (%s)))
""" % _filterlist
        else:
            self.consumer_filter = None
Exemplo n.º 8
0
    def process_remote_event(self, src_curs, dst_curs, ev):
        t = ev.type[:2]
        if t not in ('I:', 'U:', 'D:'):
            CascadedWorker.process_remote_event(self, src_curs, dst_curs, ev)
            return

        tbl = ev.extra1
        if tbl not in self.table_state:
            self.init_state(tbl)
        st = self.table_state[tbl]
        st.add(dst_curs, ev, self._batch_info)
Exemplo n.º 9
0
    def process_remote_event(self, src_curs, dst_curs, ev):
        t = ev.type[:2]
        if t not in ('I:', 'U:', 'D:'):
            CascadedWorker.process_remote_event(self, src_curs, dst_curs, ev)
            return

        tbl = ev.extra1
        if tbl not in self.table_state:
            self.init_state(tbl)
        st = self.table_state[tbl]
        st.add(dst_curs, ev, self._batch_info)
Exemplo n.º 10
0
    def __init__(self, args):
        """Replication init."""
        CascadedWorker.__init__(self, 'londiste', 'db', args)

        self.table_list = []
        self.table_map = {}

        self.copy_thread = 0
        self.set_name = self.queue_name

        self.parallel_copies = self.cf.getint('parallel_copies', 1)
        if self.parallel_copies < 1:
            raise Exception('Bad value for parallel_copies: %d' % self.parallel_copies)
Exemplo n.º 11
0
    def __init__(self, args):
        """Replication init."""
        CascadedWorker.__init__(self, "londiste3", "db", args)

        self.table_list = []
        self.table_map = {}

        self.copy_thread = 0
        self.set_name = self.queue_name
        self.used_plugins = {}

        self.parallel_copies = self.cf.getint("parallel_copies", 1)
        if self.parallel_copies < 1:
            raise Exception("Bad value for parallel_copies: %d" % self.parallel_copies)

        load_handlers(self.cf)
Exemplo n.º 12
0
    def __init__(self, args):
        """Replication init."""
        CascadedWorker.__init__(self, 'londiste3', 'db', args)

        self.table_list = []
        self.table_map = {}

        self.copy_thread = 0
        self.set_name = self.queue_name
        self.used_plugins = {}

        self.parallel_copies = self.cf.getint('parallel_copies', 1)
        if self.parallel_copies < 1:
            raise Exception('Bad value for parallel_copies: %d' % self.parallel_copies)

        load_handlers(self.cf)
Exemplo n.º 13
0
    def __init__(self, args):
        """Replication init."""
        CascadedWorker.__init__(self, 'londiste3', 'db', args)

        self.table_list = []
        self.table_map = {}

        self.copy_thread = 0
        self.set_name = self.queue_name
        self.used_plugins = {}

        self.parallel_copies = self.cf.getint('parallel_copies', 1)
        if self.parallel_copies < 1:
            raise Exception('Bad value for parallel_copies: %d' % self.parallel_copies)

        self.consumer_filter = None

        load_handler_modules(self.cf)
Exemplo n.º 14
0
    def process_remote_batch(self, src_db, tick_id, ev_list, dst_db):
        "All work for a batch.  Entry point from SetConsumer."

        # this part can play freely with transactions

        self.sync_database_encodings(src_db, dst_db)
        
        self.cur_tick = self._batch_info['tick_id']
        self.prev_tick = self._batch_info['prev_tick_id']

        dst_curs = dst_db.cursor()
        self.load_table_state(dst_curs)
        self.sync_tables(src_db, dst_db)

        self.copy_snapshot_cleanup(dst_db)

        # only main thread is allowed to restore fkeys
        if not self.copy_thread:
            self.restore_fkeys(dst_db)


        for p in self.used_plugins.values():
            p.reset()
        self.used_plugins = {}

        # now the actual event processing happens.
        # they must be done all in one tx in dst side
        # and the transaction must be kept open so that
        # the cascade-consumer can save last tick and commit.

        self.sql_list = []
        CascadedWorker.process_remote_batch(self, src_db, tick_id, ev_list, dst_db)
        self.flush_sql(dst_curs)

        for p in self.used_plugins.values():
            p.finish_batch(self.batch_info)
        self.used_plugins = {}

        # finalize table changes
        self.save_table_state(dst_curs)
Exemplo n.º 15
0
    def process_remote_event(self, src_curs, dst_curs, ev):
        """handle one event"""

        self.log.debug("New event: id=%s / type=%s / data=%s / extra1=%s" %
                       (ev.id, ev.type, ev.data, ev.extra1))

        # set current_event only if processing them one-by-one
        if self.work_state < 0:
            self.current_event = ev

        if ev.type in ('I', 'U', 'D'):
            self.handle_data_event(ev, dst_curs)
        elif ev.type[:2] in ('I:', 'U:', 'D:'):
            self.handle_data_event(ev, dst_curs)
        elif ev.type == "R":
            self.flush_sql(dst_curs)
            self.handle_truncate_event(ev, dst_curs)
        elif ev.type == 'EXECUTE':
            self.flush_sql(dst_curs)
            self.handle_execute_event(ev, dst_curs)
        elif ev.type == 'londiste.add-table':
            self.flush_sql(dst_curs)
            self.add_set_table(dst_curs, ev.data)
        elif ev.type == 'londiste.remove-table':
            self.flush_sql(dst_curs)
            self.remove_set_table(dst_curs, ev.data)
        elif ev.type == 'londiste.remove-seq':
            self.flush_sql(dst_curs)
            self.remove_set_seq(dst_curs, ev.data)
        elif ev.type == 'londiste.update-seq':
            self.flush_sql(dst_curs)
            self.update_seq(dst_curs, ev)
        else:
            CascadedWorker.process_remote_event(self, src_curs, dst_curs, ev)

        # no point keeping it around longer
        self.current_event = None
Exemplo n.º 16
0
    def process_remote_event(self, src_curs, dst_curs, ev):
        """handle one event"""

        self.log.debug("New event: id=%s / type=%s / data=%s / extra1=%s", ev.id, ev.type, ev.data, ev.extra1)

        # set current_event only if processing them one-by-one
        if self.work_state < 0:
            self.current_event = ev

        if ev.type in ('I', 'U', 'D'):
            self.handle_data_event(ev, dst_curs)
        elif ev.type[:2] in ('I:', 'U:', 'D:'):
            self.handle_data_event(ev, dst_curs)
        elif ev.type == "R":
            self.flush_sql(dst_curs)
            self.handle_truncate_event(ev, dst_curs)
        elif ev.type == 'EXECUTE':
            self.flush_sql(dst_curs)
            self.handle_execute_event(ev, dst_curs)
        elif ev.type == 'londiste.add-table':
            self.flush_sql(dst_curs)
            self.add_set_table(dst_curs, ev.data)
        elif ev.type == 'londiste.remove-table':
            self.flush_sql(dst_curs)
            self.remove_set_table(dst_curs, ev.data)
        elif ev.type == 'londiste.remove-seq':
            self.flush_sql(dst_curs)
            self.remove_set_seq(dst_curs, ev.data)
        elif ev.type == 'londiste.update-seq':
            self.flush_sql(dst_curs)
            self.update_seq(dst_curs, ev)
        else:
            CascadedWorker.process_remote_event(self, src_curs, dst_curs, ev)

        # no point keeping it around longer
        self.current_event = None
Exemplo n.º 17
0
 def copy_event(self, dst_curs, ev, filtered_copy):
     # send only data events down (skipping seqs also)
     if filtered_copy:
         if ev.type[:9] in ('londiste.', 'EXECUTE', 'TRUNCATE'):
             return
     CascadedWorker.copy_event(self, dst_curs, ev, filtered_copy)
Exemplo n.º 18
0
 def reset(self):
     """Drop our caches on error."""
     self.table_state = {}
     CascadedWorker.reset(self)
Exemplo n.º 19
0
 def finish_remote_batch(self, src_db, dst_db, tick_id):
     curs = dst_db.cursor()
     for st in self.table_state.values():
         st.flush(curs)
     CascadedWorker.finish_remote_batch(self, src_db, dst_db, tick_id)
Exemplo n.º 20
0
 def reset(self):
     """Drop our caches on error."""
     self.table_state = {}
     CascadedWorker.reset(self)
Exemplo n.º 21
0
 def copy_event(self, dst_curs, ev, filtered_copy):
     # send only data events down (skipping seqs also)
     if filtered_copy:
         if ev.type[:9] in ('londiste.',):
             return
     CascadedWorker.copy_event(self, dst_curs, ev, filtered_copy)
Exemplo n.º 22
0
 def finish_remote_batch(self, src_db, dst_db, tick_id):
     curs = dst_db.cursor()
     for st in self.table_state.values():
         st.flush(curs)
     CascadedWorker.finish_remote_batch(self, src_db, dst_db, tick_id)