def start_sync(self, broker: Broker): settings = self.settings log_file, log_pos = self.pos_handler.get_log_pos() if not (log_file and log_pos): log_file = settings.init_binlog_file log_pos = settings.init_binlog_pos if not (log_file and log_pos): log_file, log_pos = self.get_binlog_pos() self.pos_handler.set_log_pos_slave(log_file, log_pos) log_pos = int(log_pos) logger.info(f"mysql binlog: {log_file}:{log_pos}") count = last_time = 0 tables = [] schema_table = settings.schema_table for k, v in schema_table.items(): for table in v: pk = self.get_primary_key(k, table) if not pk or isinstance(pk, tuple): # skip delete and update when no pk and composite pk settings.skip_delete_tables.add(f"{k}.{table}") tables += v only_schemas = list(schema_table.keys()) only_tables = list(set(tables)) for schema, table, event, file, pos in self._binlog_reading( only_tables=only_tables, only_schemas=only_schemas, log_file=log_file, log_pos=log_pos, server_id=settings.mysql_server_id, skip_dmls=settings.skip_dmls, skip_delete_tables=settings.skip_delete_tables, skip_update_tables=settings.skip_update_tables, ): if not schema_table.get(schema) or (table and table not in schema_table.get(schema)): continue broker.send(msg=event, schema=schema) self.pos_handler.set_log_pos_slave(file, pos) logger.debug(f"send to queue success: key:{schema},event:{event}") logger.debug(f"success set binlog pos:{file}:{pos}") now = int(time.time()) count += 1 if last_time == 0: last_time = now if now - last_time >= settings.insert_interval: logger.info(f"success send {count} events in {settings.insert_interval} seconds") last_time = count = 0
def start_sync(self, broker: Broker): log_file, log_pos = self.pos_handler.get_log_pos() if not (log_file and log_pos): log_file = self.init_binlog_file log_pos = self.init_binlog_pos if not (log_file and log_pos): log_file, log_pos = self.get_binlog_pos() self.pos_handler.set_log_pos_slave(log_file, log_pos) log_pos = int(log_pos) logger.info(f"mysql binlog: {log_file}:{log_pos}") tables = [] schema_tables = {} for database in self.source_db.get("databases"): database_name = database.get("database") for table in database.get("tables"): table_name = table.get("table") schema_tables.setdefault(database_name, []).append(table_name) pk = self.get_primary_key(database_name, table_name) if not pk or isinstance(pk, tuple): # skip delete and update when no pk and composite pk database_table = f"{database_name}.{table_name}" if database_table not in database_table: self.skip_delete_tables.append(database_table) tables.append(table_name) only_schemas = self.databases only_tables = list(set(tables)) for schema, table, event, file, pos in self._binlog_reading( only_tables=only_tables, only_schemas=only_schemas, log_file=log_file, log_pos=log_pos, server_id=self.server_id, skip_dmls=self.skip_dmls, skip_delete_tables=self.skip_delete_tables, skip_update_tables=self.skip_update_tables, ): if table and table not in schema_tables.get(schema): continue event["values"] = self.deep_decode_dict(event["values"]) broker.send(msg=event, schema=schema) self.pos_handler.set_log_pos_slave(file, pos) logger.debug(f"send to queue success: key:{schema},event:{event}") logger.debug(f"success set binlog pos:{file}:{pos}") self.after_send(schema, table)
def _consumer(self, broker: Broker, database: str, msg: ReplicationMessage): payload = json.loads(msg.payload) change = payload.get("change") if not change: return change = change[0] kind = change.get("kind") table = change.get("table") columnnames = change.get("columnnames") columnvalues = change.get("columnvalues") oldkeys = change.get("oldkeys") skip_dml_table_name = f"{database}.{table}" delete_event = event = None if kind == "update": values = dict(zip(columnnames, columnvalues)) if ( "update" not in self.skip_dmls and skip_dml_table_name not in self.skip_update_tables ): delete_event = { "table": table, "schema": database, "action": "delete", "values": values, "event_unixtime": int(time.time() * 10 ** 6), "action_seq": 1, } event = { "table": table, "schema": database, "action": "insert", "values": values, "event_unixtime": int(time.time() * 10 ** 6), "action_seq": 2, } elif kind == "delete": values = dict(zip(oldkeys.get("keynames"), oldkeys.get("keyvalues"))) if ( "delete" not in self.skip_dmls and skip_dml_table_name not in self.skip_delete_tables ): event = { "table": table, "schema": database, "action": "delete", "values": values, "event_unixtime": int(time.time() * 10 ** 6), "action_seq": 1, } elif kind == "insert": values = dict(zip(columnnames, columnvalues)) event = { "table": table, "schema": database, "action": "insert", "values": values, "event_unixtime": int(time.time() * 10 ** 6), "action_seq": 2, } else: return event["values"] = self.deep_decode_dict(event["values"]) if delete_event: broker.send(msg=delete_event, schema=database) if event: broker.send(msg=event, schema=database) msg.cursor.send_feedback(flush_lsn=msg.data_start) logger.debug(f"send to queue success: key:{database},event:{event}") logger.debug(f"success flush lsn: {msg.data_start}") with self.lock: self.lsn = msg.data_start self.after_send(database, table)
def __init__(self, alias: str): Broker.__init__(self, alias) Redis.__init__(self)
def _consumer(self, broker: Broker, database: str, msg: ReplicationMessage): payload = json.loads(msg.payload) change = payload.get("change") if not change: return change = change[0] kind = change.get("kind") table = change.get("table") columnnames = change.get("columnnames") columnvalues = change.get("columnvalues") skip_dml_table_name = f"{database}.{table}" values = dict(zip(columnnames, columnvalues)) delete_event = event = None if kind == "update": if ("update" not in self.settings.skip_dmls and skip_dml_table_name not in self.settings.skip_update_tables): delete_event = { "table": table, "schema": database, "action": "delete", "values": values, "event_unixtime": int(time.time() * 10**6), "action_core": "1", } event = { "table": table, "schema": database, "action": "insert", "values": values, "event_unixtime": int(time.time() * 10**6), "action_core": "2", } elif kind == "delete": if ("delete" not in self.settings.skip_dmls and skip_dml_table_name not in self.settings.skip_delete_tables): event = { "table": table, "schema": database, "action": "delete", "values": values, "event_unixtime": int(time.time() * 10**6), "action_core": "1", } elif kind == "insert": event = { "table": table, "schema": database, "action": "insert", "values": values, "event_unixtime": int(time.time() * 10**6), "action_core": "2", } else: return if delete_event: broker.send(msg=delete_event, schema=database) if event: broker.send(msg=event, schema=database) msg.cursor.send_feedback(flush_lsn=msg.data_start) logger.debug(f"send to queue success: key:{database},event:{event}") logger.debug(f"success flush lsn: {msg.data_start}") with self.lock: self.lsn = msg.data_start now = int(time.time()) self.count += 1 if self.last_time == 0: self.last_time = now if now - self.last_time >= self.settings.insert_interval: logger.info( f"success send {self.count} events in {self.settings.insert_interval} seconds" ) self.last_time = self.count = 0