示例#1
0
    def process_remote_batch(self, src_db, tick_id, ev_list, dst_db):
        "All work for a batch.  Entry point from SetConsumer."

        # this part can play freely with transactions

        self.sync_database_encodings(src_db, dst_db)
        
        self.cur_tick = self._batch_info['tick_id']
        self.prev_tick = self._batch_info['prev_tick_id']

        dst_curs = dst_db.cursor()
        self.load_table_state(dst_curs)
        self.sync_tables(src_db, dst_db)

        self.copy_snapshot_cleanup(dst_db)

        # only main thread is allowed to restore fkeys
        if not self.copy_thread:
            self.restore_fkeys(dst_db)

        # now the actual event processing happens.
        # they must be done all in one tx in dst side
        # and the transaction must be kept open so that
        # the cascade-consumer can save last tick and commit.

        self.sql_list = []
        CascadedWorker.process_remote_batch(self, src_db, tick_id, ev_list, dst_db)
        self.flush_sql(dst_curs)

        # finalize table changes
        self.save_table_state(dst_curs)
示例#2
0
    def process_remote_batch(self, src_db, tick_id, ev_list, dst_db):
        "All work for a batch.  Entry point from SetConsumer."

        # this part can play freely with transactions

        if not self.code_check_done:
            self.check_code(dst_db)
            self.code_check_done = 1

        self.sync_database_encodings(src_db, dst_db)

        self.cur_tick = self.batch_info["tick_id"]
        self.prev_tick = self.batch_info["prev_tick_id"]

        dst_curs = dst_db.cursor()
        self.load_table_state(dst_curs)
        self.sync_tables(src_db, dst_db)

        self.copy_snapshot_cleanup(dst_db)

        # only main thread is allowed to restore fkeys
        if not self.copy_thread:
            self.restore_fkeys(dst_db)

        for p in self.used_plugins.values():
            p.reset()
        self.used_plugins = {}

        # now the actual event processing happens.
        # they must be done all in one tx in dst side
        # and the transaction must be kept open so that
        # the cascade-consumer can save last tick and commit.

        self.sql_list = []
        CascadedWorker.process_remote_batch(self, src_db, tick_id, ev_list, dst_db)
        self.flush_sql(dst_curs)

        for p in self.used_plugins.values():
            p.finish_batch(self.batch_info, dst_curs)
        self.used_plugins = {}

        # finalize table changes
        self.save_table_state(dst_curs)

        # store event filter
        if self.cf.getboolean("local_only", False):
            if self.copy_thread:
                _filterlist = skytools.quote_literal(self.copy_table_name)
            else:
                _filterlist = ",".join(map(skytools.quote_literal, self.table_map.keys()))
            self.consumer_filter = (
                """
((ev_type like 'pgq%%' or ev_type like 'londiste%%')
or (ev_extra1 in (%s)))
"""
                % _filterlist
            )
        else:
            self.consumer_filter = None
示例#3
0
    def process_remote_batch(self, src_db, tick_id, ev_list, dst_db):
        "All work for a batch.  Entry point from SetConsumer."

        # this part can play freely with transactions

        if not self.code_check_done:
            self.check_code(dst_db)
            self.code_check_done = 1

        self.sync_database_encodings(src_db, dst_db)

        self.cur_tick = self.batch_info['tick_id']
        self.prev_tick = self.batch_info['prev_tick_id']

        dst_curs = dst_db.cursor()
        self.load_table_state(dst_curs)
        self.sync_tables(src_db, dst_db)

        self.copy_snapshot_cleanup(dst_db)

        # only main thread is allowed to restore fkeys
        if not self.copy_thread:
            self.restore_fkeys(dst_db)

        for p in self.used_plugins.values():
            p.reset()
        self.used_plugins = {}

        # now the actual event processing happens.
        # they must be done all in one tx in dst side
        # and the transaction must be kept open so that
        # the cascade-consumer can save last tick and commit.

        self.sql_list = []
        CascadedWorker.process_remote_batch(self, src_db, tick_id, ev_list,
                                            dst_db)
        self.flush_sql(dst_curs)

        for p in self.used_plugins.values():
            p.finish_batch(self.batch_info, dst_curs)
        self.used_plugins = {}

        # finalize table changes
        self.save_table_state(dst_curs)

        # store event filter
        if self.cf.getboolean('local_only', False):
            if self.copy_thread:
                _filterlist = skytools.quote_literal(self.copy_table_name)
            else:
                _filterlist = ','.join(
                    map(skytools.quote_literal, self.table_map.keys()))
            self.consumer_filter = """
((ev_type like 'pgq%%' or ev_type like 'londiste%%')
or (ev_extra1 in (%s)))
""" % _filterlist
        else:
            self.consumer_filter = None
示例#4
0
    def process_remote_batch(self, src_db, tick_id, ev_list, dst_db):
        "All work for a batch.  Entry point from SetConsumer."

        # this part can play freely with transactions

        self.sync_database_encodings(src_db, dst_db)
        
        self.cur_tick = self._batch_info['tick_id']
        self.prev_tick = self._batch_info['prev_tick_id']

        dst_curs = dst_db.cursor()
        self.load_table_state(dst_curs)
        self.sync_tables(src_db, dst_db)

        self.copy_snapshot_cleanup(dst_db)

        # only main thread is allowed to restore fkeys
        if not self.copy_thread:
            self.restore_fkeys(dst_db)


        for p in self.used_plugins.values():
            p.reset()
        self.used_plugins = {}

        # now the actual event processing happens.
        # they must be done all in one tx in dst side
        # and the transaction must be kept open so that
        # the cascade-consumer can save last tick and commit.

        self.sql_list = []
        CascadedWorker.process_remote_batch(self, src_db, tick_id, ev_list, dst_db)
        self.flush_sql(dst_curs)

        for p in self.used_plugins.values():
            p.finish_batch(self.batch_info)
        self.used_plugins = {}

        # finalize table changes
        self.save_table_state(dst_curs)