コード例 #1
0
ファイル: sync.py プロジェクト: xhook7/py-mysql-es
	def proc_binlog(self):
		stream = BinLogStreamReader(
			connection_settings = self.config['mysql'],
			server_id = self.config['slave']['server_id'],
			log_file = self.log_file,
			log_pos = self.log_pos,
			only_schemas = self.config['slave']['schemas'],
			blocking = True,
			resume_stream = bool(self.log_file and self.log_pos),
			only_events=[WriteRowsEvent, UpdateRowsEvent, DeleteRowsEvent]
		)
		for binlogevent in stream:
			#binlogevent.dump()
			self.log_file = stream.log_file
			self.log_pos  = stream.log_pos
			for row in binlogevent.rows:
				pk = binlogevent.primary_key
				table = binlogevent.table
				schema = binlogevent.schema
				if isinstance(binlogevent, WriteRowsEvent):
					yield self.es.index_op(self._format(row['values']), doc_type=table, index=schema, id=row['values'][pk])
				elif isinstance(binlogevent, UpdateRowsEvent):
					yield self.es.update_op(self._format(row['after_values']), doc_type=table, index=schema, id=row['after_values'][pk])
				elif isinstance(binlogevent, DeleteRowsEvent):
					yield self.es.delete_op(doc_type=table, index=schema, id=row['values'][pk])
				else:
					continue

		stream.close()
コード例 #2
0
def main():
    stream = BinLogStreamReader(
        connection_settings=MYSQL_SETTINGS,
        only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent])

    for binlogevent in stream:
        for row in binlogevent.rows:
            event = {}
            event["schema"] = binlogevent.schema
            event["table"] = binlogevent.table

            if isinstance(binlogevent, DeleteRowsEvent):
                event["action"] = "delete"
                event = dict(event.items() + row["values"].items())
            elif isinstance(binlogevent, UpdateRowsEvent):
                event["action"] = "update"
                event = dict(event.items() + row["after_values"].items())
            elif isinstance(binlogevent, WriteRowsEvent):
                event["action"] = "insert"
                event = dict(event.items() + row["values"].items())
            print json.dumps(event)
            sys.stdout.flush()


    stream.close()
コード例 #3
0
ファイル: cdc.py プロジェクト: dlf412/mysql-cdc-redis
def main():
    rclient = redis.from_url(redis_url)
    cache = rcache.Rcache(cache_url, server_id)
 
    log_file = rclient.get("log_file")
    log_pos = rclient.get("log_pos")
    log_pos = int(log_pos) if log_pos else None
 
    only_events = _trans_events(events)
    only_events.append(RotateEvent)
 
    stream = BinLogStreamReader(
        connection_settings=mysql_settings,
        server_id=server_id,
        blocking=blocking,
        only_events=only_events,                                                                                                                                                       
        only_tables=tables,
        only_schemas=schemas,
        resume_stream=True,  # for resuming
        freeze_schema=False, # do not support alter table event for faster
        log_file=log_file,
        log_pos=log_pos)
    row_count = 0
 
    for binlogevent in stream:
        if int(time.time()) - binlogevent.timestamp > binlog_max_latency:
            logger.warn("latency[{}] too large".format(
                int(time.time()) - binlogevent.timestamp))
        logger.debug("catch {}".format(binlogevent.__class__.__name__))
        if isinstance(binlogevent, RotateEvent):  #listen log_file changed event
            rclient.set("log_file", binlogevent.next_binlog)
            rclient.set("log_pos", binlogevent.position)
            logger.info("log_file:{}, log_position:{}".format(
                binlogevent.next_binlog, binlogevent.position))
        else:
            row_count += 1
            table = "%s.%s" % (binlogevent.schema, binlogevent.table)
            vals_lst = _get_row_values(binlogevent)
            if not binlogevent.primary_key:
                tables_without_primary_key.get(table, None)
            try:
                cache.save(table, binlogevent.primary_key, vals_lst)
                logger.debug("save {} {} rows to cache".format(
                    table, len(vals_lst)))
            except rcache.SaveIgnore as err:
                logger.warning(str(err))
            except rcache.FullError as err:
                logger.info("cache OOM occured: {}.trigger dump command".format(
                    str(err)))
                dump_code = _trigger_dumping()
                cache.save(table, binlogevent.primary_key, vals_lst)
            if cache_max_rows and cache.size > cache_max_rows:
                logger.info("cache size:{} >= {}, trigger dumping".format(
                   cache.size, cache_max_rows))
                _trigger_dumping()
            rclient.set("log_pos", binlogevent.packet.log_pos)
        if row_count % 1000 == 0:
            logger.info("save {} changed rows".format(row_count))
 
    stream.close()
コード例 #4
0
def main():
    # connect rethinkdb
    rethinkdb.connect("localhost", 28015, "mysql")
    try:
        rethinkdb.db_drop("mysql").run()
    except:
        pass
    rethinkdb.db_create("mysql").run()

    tables = ["dept_emp", "dept_manager", "titles",
              "salaries", "employees", "departments"]
    for table in tables:
        rethinkdb.db("mysql").table_create(table).run()

    stream = BinLogStreamReader(
        connection_settings=MYSQL_SETTINGS,
        blocking=True,
        only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent],
    )

    # process Feed
    for binlogevent in stream:
        if not isinstance(binlogevent, WriteRowsEvent):
            continue

        for row in binlogevent.rows:
            if not binlogevent.schema == "employees":
                continue

            vals = {}
            vals = {str(k): str(v) for k, v in row["values"].iteritems()}
            rethinkdb.table(binlogevent.table).insert(vals).run()

    stream.close()
コード例 #5
0
ファイル: listener.py プロジェクト: JianfuLi/hamal
class Listener(object):
    def __init__(self, connection_settings, server_id, blocking=True, resume_stream=True):
        self._stream = BinLogStreamReader(
            connection_settings=connection_settings,
            server_id=server_id,
            only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent],
            blocking=blocking,
            resume_stream=resume_stream
        )

    def __del__(self):
        self._stream.close()

    def accept(self, callback):
        for log in self._stream:
            for row in log.rows:
                fields = {}
                method = ''
                if isinstance(log, DeleteRowsEvent):
                    fields = row["values"]
                    method = 'DELETE'
                elif isinstance(log, UpdateRowsEvent):
                    fields = row["after_values"]
                    method = 'UPDATE'
                elif isinstance(log, WriteRowsEvent):
                    method = 'INSERT'
                    fields = row["values"]

                logger.debug(
                    "捕获mysql %r事件, 值为: %r",
                    method, json.dumps(fields)
                )
                callback(log.schema, log.table, method, fields)
コード例 #6
0
ファイル: zabbix_bridge.py プロジェクト: OpenTSDB/tcollector
def main():
    utils.drop_privileges()
    if BinLogStreamReader is None:
        utils.err("error: Python module `pymysqlreplication' is missing")
        return 1
    settings = zabbix_bridge_conf.get_settings()

    # Set blocking to True if you want to block and wait for the next event at
    # the end of the stream
    stream = BinLogStreamReader(connection_settings=settings['mysql'],
                                server_id=settings['slaveid'],
                                only_events=[WriteRowsEvent],
                                resume_stream=True,
                                blocking=True)

    db_filename = settings['sqlitedb']
    dbcache = sqlite3.connect(':memory:')
    cachecur = dbcache.cursor()
    cachecur.execute("ATTACH DATABASE '%s' as 'dbfile'" % (db_filename,))
    cachecur.execute('CREATE TABLE zabbix_cache AS SELECT * FROM dbfile.zabbix_cache')
    cachecur.execute('CREATE UNIQUE INDEX uniq_zid on zabbix_cache (id)')

    # tcollector.zabbix_bridge namespace for internal Zabbix bridge metrics.
    log_pos = 0
    key_lookup_miss = 0
    sample_last_ts = int(time.time())
    last_key_lookup_miss = 0

    for binlogevent in stream:
        if binlogevent.schema == settings['mysql']['db']:
            table = binlogevent.table
            log_pos = binlogevent.packet.log_pos
            if table == 'history' or table == 'history_uint':
                for row in binlogevent.rows:
                    r = row['values']
                    itemid = r['itemid']
                    cachecur.execute('SELECT id, key, host, proxy FROM zabbix_cache WHERE id=?', (itemid,))
                    row = cachecur.fetchone()
                    if (row is not None):
                        print("zbx.%s %d %s host=%s proxy=%s" % (row[1], r['clock'], r['value'], row[2], row[3]))
                        if ((int(time.time()) - sample_last_ts) > settings['internal_metric_interval']): # Sample internal metrics @ 10s intervals
                            sample_last_ts = int(time.time())
                            print("tcollector.zabbix_bridge.log_pos %d %s" % (sample_last_ts, log_pos))
                            print("tcollector.zabbix_bridge.key_lookup_miss %d %s" % (sample_last_ts, key_lookup_miss))
                            print("tcollector.zabbix_bridge.timestamp_drift %d %s" % (sample_last_ts, (sample_last_ts - r['clock'])))
                            if ((key_lookup_miss - last_key_lookup_miss) > settings['dbrefresh']):
                                print("tcollector.zabbix_bridge.key_lookup_miss_reload %d %s" % (sample_last_ts, (key_lookup_miss - last_key_lookup_miss)))
                                cachecur.execute('DROP TABLE zabbix_cache')
                                cachecur.execute('CREATE TABLE zabbix_cache AS SELECT * FROM dbfile.zabbix_cache')
                                cachecur.execute('CREATE UNIQUE INDEX uniq_zid on zabbix_cache (id)')
                                last_key_lookup_miss = key_lookup_miss
                    else:
                        # TODO: Consider https://wiki.python.org/moin/PythonDecoratorLibrary#Retry
                        utils.err("error: Key lookup miss for %s" % (itemid))
                        key_lookup_miss += 1
                sys.stdout.flush()

    dbcache.close()
    stream.close()
コード例 #7
0
ファイル: mysql.py プロジェクト: njordr/mymongo
def mysql_stream(conf, mongo, queue_out):
    logger = logging.getLogger(__name__)

    # server_id is your slave identifier, it should be unique.
    # set blocking to True if you want to block and wait for the next event at
    # the end of the stream
    mysql_settings = {
        "host": conf['host'],
        "port": conf.getint('port'),
        "user": conf['user'],
        "passwd": conf['password']
    }

    last_log = mongo.get_log_pos()
    if last_log['log_file'] == 'NA':
        log_file = None
        log_pos = None
        resume_stream = False
    else:
        log_file = last_log['log_file']
        log_pos = int(last_log['log_pos'])
        resume_stream = True

    stream = BinLogStreamReader(connection_settings=mysql_settings,
                                server_id=conf.getint('slaveid'),
                                only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent],
                                blocking=True,
                                resume_stream=resume_stream,
                                log_file=log_file,
                                log_pos=log_pos,
                                only_schemas=conf['databases'].split(','))

    for binlogevent in stream:
        binlogevent.dump()
        schema = "%s" % binlogevent.schema
        table = "%s" % binlogevent.table

        for row in binlogevent.rows:
            if isinstance(binlogevent, DeleteRowsEvent):
                vals = row["values"]
                event_type = 'delete'
            elif isinstance(binlogevent, UpdateRowsEvent):
                vals = dict()
                vals["before"] = row["before_values"]
                vals["after"] = row["after_values"]
                event_type = 'update'
            elif isinstance(binlogevent, WriteRowsEvent):
                vals = row["values"]
                event_type = 'insert'

            seqnum = mongo.write_to_queue(event_type, vals, schema, table)
            mongo.write_log_pos(stream.log_file, stream.log_pos)
            queue_out.put({'seqnum': seqnum})
            logger.debug(row)
            logger.debug(stream.log_pos)
            logger.debug(stream.log_file)

    stream.close()
コード例 #8
0
def main():
  global repLogFile
  global repLogPosition
  global repLogConfig

  graphiteConfig = readGraphiteConfig()

  try:
    print "Start"
    sock = socket.socket()
    sock.connect((CARBON_SERVER, CARBON_PORT))
    print 'Carbon socket opened.'
    stream = BinLogStreamReader(
        connection_settings=MYSQL_SETTINGS,
        server_id=2, #server id needs to be unique
        only_events=[WriteRowsEvent,DeleteRowsEvent,UpdateRowsEvent],
        blocking=True,
        log_file=repLogFile,
        log_pos=repLogPosition,
        resume_stream=False if repLogPosition==None else True)
    print "Binlog stream opened"

    for binlogevent in stream:
      #put replication log file and position in variables so we can save them later
      repLogFile = stream.log_file
      repLogPosition = stream.log_pos
      #also check for changes in graphite configuration and read again if needed
      if binlogevent.schema == "weather" and binlogevent.table == "graphite":
        graphiteConfig = readGraphiteConfig()
      #this is the data we are interested in
      if binlogevent.schema == "weather" and binlogevent.table == "data":

        for row in binlogevent.rows:
          #we only care about inserts
          if isinstance(binlogevent, WriteRowsEvent):
            vals = row["values"]
            #check if the sensor is one that we have configuration for
            if vals["sensorid"] in graphiteConfig:
              conf = graphiteConfig[vals["sensorid"]]
              value = float(vals["value"])
              #do a conversion if needed
              if conf["formula"]!=None and conf["formula"]!="":
                value=eval(conf["formula"], {"__builtins__": {}}, {"value":value,"round":round})
              #construc the message and send it to carbon
              message = '%s %f %d\n' % (conf["graphitepath"], value, round((vals["time"] - _EPOCH).total_seconds()))
              sock.sendall(message)
              print str(vals["sensorid"]), str(vals["time"]), str(value)
              print message

  except KeyboardInterrupt:
    #close open connections
    stream.close()
    sock.close()
    #save replication log position
    repLogConfig.set('replicationlog','file',repLogFile)
    repLogConfig.set('replicationlog','position',str(repLogPosition))
    with open('replogposition.ini', 'w') as f:
      repLogConfig.write(f)
コード例 #9
0
class TestCTLConnectionSettings(base.PyMySQLReplicationTestCase):

    def setUp(self):
        super(TestCTLConnectionSettings, self).setUp()
        self.stream.close()
        ctl_db = copy.copy(self.database)
        ctl_db["db"] = None
        ctl_db["port"] = 3307
        self.ctl_conn_control = pymysql.connect(**ctl_db)
        self.ctl_conn_control.cursor().execute("DROP DATABASE IF EXISTS pymysqlreplication_test")
        self.ctl_conn_control.cursor().execute("CREATE DATABASE pymysqlreplication_test")
        self.ctl_conn_control.close()
        ctl_db["db"] = "pymysqlreplication_test"
        self.ctl_conn_control = pymysql.connect(**ctl_db)
        self.stream = BinLogStreamReader(
            self.database,
            ctl_connection_settings=ctl_db,
            server_id=1024,
            only_events=(WriteRowsEvent,),
            fail_on_table_metadata_unavailable=True
        )

    def tearDown(self):
        super(TestCTLConnectionSettings, self).tearDown()
        self.ctl_conn_control.close()

    def test_seperate_ctl_settings_table_metadata_unavailable(self):
        self.execute("CREATE TABLE test (id INTEGER(11))")
        self.execute("INSERT INTO test VALUES (1)")
        self.execute("COMMIT")

        had_error = False
        try:
            event = self.stream.fetchone()
        except TableMetadataUnavailableError as e:
            had_error = True
            assert "test" in e.args[0]
        finally:
            self.resetBinLog()
            assert had_error

    def test_seperate_ctl_settings_no_error(self):
        self.execute("CREATE TABLE test (id INTEGER(11))")
        self.execute("INSERT INTO test VALUES (1)")
        self.execute("DROP TABLE test")
        self.execute("COMMIT")
        self.ctl_conn_control.cursor().execute("CREATE TABLE test (id INTEGER(11))")
        self.ctl_conn_control.cursor().execute("INSERT INTO test VALUES (1)")
        self.ctl_conn_control.cursor().execute("COMMIT")
        try:
            self.stream.fetchone()
        except Exception as e:
            self.fail("raised unexpected exception: {exception}".format(exception=e))
        finally:
            self.resetBinLog()
コード例 #10
0
def main():
    # server_id is your slave identifier, it should be unique.
    # set blocking to True if you want to block and wait for the next event at
    # the end of the stream
    stream = BinLogStreamReader(connection_settings=MYSQL_SETTINGS,
                                server_id=3,
                                blocking=True)

    for binlogevent in stream:
        binlogevent.dump()

    stream.close()
コード例 #11
0
def main():
    # server_id is your slave identifier, it should be unique.
    # set blocking to True if you want to block and wait for the next event at
    # the end of the stream
    stream = BinLogStreamReader(connection_settings=MYSQL_SETTINGS,
                                server_id=3,
                                log_file="mysql-bin.000002",
                                blocking=True,
                                only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent])

    for binlogevent in stream:
        binlogevent.dump()

    stream.close()
コード例 #12
0
def consume_events():
    stream = BinLogStreamReader(connection_settings=database,
                                server_id=3,
                                resume_stream=False,
                                blocking=True,
                                only_events = [UpdateRowsEvent],
                                only_tables = ['test'] )
    start = time.clock()
    i = 0.0
    for binlogevent in stream:
            i += 1.0
            if i % 1000 == 0:
                print("%d event by seconds (%d total)" % (i / (time.clock() - start), i))
    stream.close()
コード例 #13
0
    def test_log_pos_handles_disconnects(self):
        self.stream = BinLogStreamReader(
            connection_settings=self.database,
            resume_stream=True
        )

        query = "CREATE TABLE test (id INT  PRIMARY KEY AUTO_INCREMENT, data VARCHAR (50) NOT NULL)"
        self.execute(query)
        query = "INSERT INTO test (data) VALUES('Hello')"
        self.execute(query)
        self.execute("COMMIT")

        self.assertIsInstance(self.stream.fetchone(), RotateEvent)

        self.assertIsInstance(self.stream.fetchone(), FormatDescriptionEvent)
        self.assertGreater(self.stream.log_pos, 0)

        self.assertIsInstance(self.stream.fetchone(), QueryEvent)
        self.assertIsInstance(self.stream.fetchone(), QueryEvent)
        self.assertIsInstance(self.stream.fetchone(), TableMapEvent)
        self.assertIsInstance(self.stream.fetchone(), WriteRowsEvent)
        self.assertIsInstance(self.stream.fetchone(), XidEvent)

        self.assertIsNone(self.stream.fetchone())
        self.assertIsInstance(self.stream.fetchone(), RotateEvent)

        self.assertIsInstance(self.stream.fetchone(), FormatDescriptionEvent)
        self.assertGreater(self.stream.log_pos, 0)
コード例 #14
0
    def __init__(self, mysql_settings, server_id, dump_file_path,
                 log_file=None, log_pos=None,
                 gtid_set=None, table_filters=None):
        # TODO: gtid mode support
        # https://dev.mysql.com/doc/refman/en/replication-gtids.html
        # TODO: wild chars in table_filters
        self.mysql_settings = mysql_settings
        self.server_id = server_id
        self.log_file = log_file
        self.log_pos = log_pos
        self.dump_file_path = dump_file_path

        if table_filters:
            self.table_filters = {schema:frozenset(tables) for schema, tables in table_filters.items()}
            only_schemas = [schema for schema in table_filters]
        else:
            self.table_filters = None
            only_schemas = None

        self.binlog_stream_reader = BinLogStreamReader(
            connection_settings=self.mysql_settings,
            server_id=self.server_id,
            log_file=self.log_file,
            log_pos=self.log_pos,
            resume_stream=True,
            blocking=False,
            freeze_schema=True,
            only_schemas=only_schemas,
        )
コード例 #15
0
    def test_alter_column(self):
        self.stream.close()
        self.execute("CREATE TABLE test_alter_column (id INTEGER(11), data VARCHAR(50))")
        self.execute("INSERT INTO test_alter_column VALUES (1, 'A value')")
        self.execute("COMMIT")
        # this is a problem only when column is added in position other than at the end
        self.execute("ALTER TABLE test_alter_column ADD COLUMN another_data VARCHAR(50) AFTER id")
        self.execute("INSERT INTO test_alter_column VALUES (2, 'Another value', 'A value')")
        self.execute("COMMIT")

        self.stream = BinLogStreamReader(
            self.database,
            server_id=1024,
            only_events=(WriteRowsEvent,),
            )
        event = self.stream.fetchone()  # insert with two values
        # both of these asserts fail because of issue underlying proble described in issue #118
        # because it got table schema info after the alter table, it wrongly assumes the second
        # column of the first insert is 'another_data'
        # ER: {'id': 1, 'data': 'A value'}
        # AR: {'id': 1, 'another_data': 'A value'}
        self.assertIn("data", event.rows[0]["values"])
        self.assertNot("another_data", event.rows[0]["values"])
        self.assertEqual(event.rows[0]["values"]["data"], 'A value')
        self.stream.fetchone()  # insert with three values
コード例 #16
0
    def test_log_pos_handles_disconnects(self):
        self.stream.close()
        self.stream = BinLogStreamReader(
            self.database,
            server_id=1024,
            resume_stream=False,
            only_events=[FormatDescriptionEvent, QueryEvent, TableMapEvent, WriteRowsEvent, XidEvent],
        )

        query = "CREATE TABLE test (id INT  PRIMARY KEY AUTO_INCREMENT, data VARCHAR (50) NOT NULL)"
        self.execute(query)
        query = "INSERT INTO test (data) VALUES('Hello')"
        self.execute(query)
        self.execute("COMMIT")

        self.assertIsInstance(self.stream.fetchone(), FormatDescriptionEvent)
        self.assertGreater(self.stream.log_pos, 0)
        self.assertIsInstance(self.stream.fetchone(), QueryEvent)

        self.assertIsInstance(self.stream.fetchone(), QueryEvent)
        self.assertIsInstance(self.stream.fetchone(), TableMapEvent)
        self.assertIsInstance(self.stream.fetchone(), WriteRowsEvent)

        self.assertIsInstance(self.stream.fetchone(), XidEvent)

        self.assertGreater(self.stream.log_pos, 0)
コード例 #17
0
    def test_log_pos(self):
        query = "CREATE TABLE test (id INT NOT NULL AUTO_INCREMENT, data VARCHAR (50) NOT NULL, PRIMARY KEY (id))"
        self.execute(query)
        query = "INSERT INTO test (data) VALUES('Hello')"
        self.execute(query)
        self.execute("COMMIT")

        for i in range(6):
            self.stream.fetchone()
        # record position after insert
        log_file, log_pos = self.stream.log_file, self.stream.log_pos

        query = "UPDATE test SET data = 'World' WHERE id = 1"
        self.execute(query)
        self.execute("COMMIT")

        # resume stream from previous position
        if self.stream is not None:
            self.stream.close()
        self.stream = BinLogStreamReader(
            connection_settings=self.database,
            resume_stream=True,
            log_file=log_file,
            log_pos=log_pos
        )

        self.assertIsInstance(self.stream.fetchone(), RotateEvent)
        self.assertIsInstance(self.stream.fetchone(), FormatDescriptionEvent)
        self.assertIsInstance(self.stream.fetchone(), XidEvent)
        # QueryEvent for the BEGIN
        self.assertIsInstance(self.stream.fetchone(), QueryEvent)
        self.assertIsInstance(self.stream.fetchone(), TableMapEvent)
        self.assertIsInstance(self.stream.fetchone(), UpdateRowsEvent)
        self.assertIsInstance(self.stream.fetchone(), XidEvent)
コード例 #18
0
    def test_position_gtid(self):
        query = "CREATE TABLE test (id INT NOT NULL, data VARCHAR (50) NOT NULL, PRIMARY KEY (id))"
        self.execute(query)
        query = "BEGIN;"
        self.execute(query)
        query = "INSERT INTO test (id, data) VALUES(1, 'Hello');"
        self.execute(query)
        query = "COMMIT;"
        self.execute(query)

        query = "CREATE TABLE test2 (id INT NOT NULL, data VARCHAR (50) NOT NULL, PRIMARY KEY (id))"
        self.execute(query)
        query = "SELECT @@global.gtid_executed;"
        gtid = self.execute(query).fetchone()[0]

        self.stream.close()
        self.stream = BinLogStreamReader(self.database, server_id=1024, blocking=True, auto_position=gtid)

        self.assertIsInstance(self.stream.fetchone(), RotateEvent)
        self.assertIsInstance(self.stream.fetchone(), FormatDescriptionEvent)
        self.assertIsInstance(self.stream.fetchone(), GtidEvent)
        event = self.stream.fetchone()

        self.assertEqual(
            event.query, "CREATE TABLE test2 (id INT NOT NULL, data VARCHAR (50) NOT NULL, PRIMARY KEY (id))"
        )
コード例 #19
0
    def test_connection_stream_lost_event(self):
        self.stream.close()
        self.stream = BinLogStreamReader(
            self.database, server_id=1024, blocking=True, ignored_events=self.ignoredEvents()
        )

        query = "CREATE TABLE test (id INT NOT NULL AUTO_INCREMENT, data VARCHAR (50) NOT NULL, PRIMARY KEY (id))"
        self.execute(query)
        query2 = "INSERT INTO test (data) VALUES('a')"
        for i in range(0, 10000):
            self.execute(query2)
        self.execute("COMMIT")

        self.assertIsInstance(self.stream.fetchone(), RotateEvent)
        self.assertIsInstance(self.stream.fetchone(), FormatDescriptionEvent)

        event = self.stream.fetchone()

        self.assertIsInstance(event, QueryEvent)
        self.assertEqual(event.query, query)

        self.conn_control.kill(self.stream._stream_connection.thread_id())
        for i in range(0, 1000):
            event = self.stream.fetchone()
            self.assertIsNotNone(event)
コード例 #20
0
ファイル: binlog.py プロジェクト: ycg/Python-Tools
def binlog_process(args):
    file = None
    stream = None
    sql_list = []
    try:
        file = open(args.out_file, "w+")
        stream = BinLogStreamReader(connection_settings=connection_settings, log_file=args.log_file, log_pos=args.start_pos,
                                    resume_stream=True, only_schemas=args.databases, only_tables=args.tables, server_id=args.server_id)

        for binlogevent in stream:
            if(args.log_file != stream.log_file):
                break

            if(args.end_pos != None):
                if(binlogevent.packet.log_pos > args.end_pos):
                    break

            if(args.start_datetime != None):
                if(datetime.datetime.fromtimestamp(binlogevent.timestamp) < args.start_datetime):
                    continue

            if(args.end_datetime != None):
                if(datetime.datetime.fromtimestamp(binlogevent.timestamp) > args.end_datetime):
                    break

            if (isinstance(binlogevent, WriteRowsEvent)):
                for row in binlogevent.rows:
                    if(args.flashback):
                        sql_list.append(delete_to_sql(row, binlogevent) + "\n")
                    else:
                        sql_list.append(insert_to_sql(row, binlogevent) + "\n")
            elif (isinstance(binlogevent, DeleteRowsEvent)):
                for row in binlogevent.rows:
                    if(args.flashback):
                        sql_list.append(insert_to_sql(row, binlogevent) + "\n")
                    else:
                        sql_list.append(delete_to_sql(row, binlogevent) + "\n")
            elif (isinstance(binlogevent, UpdateRowsEvent)):
                for row in binlogevent.rows:
                    sql_list.append(update_to_sql(row, binlogevent, args.flashback) + "\n")
        file.writelines(sql_list)
    finally:
        if(stream != None):
            stream.close()
        if(file != None):
            file.close()
コード例 #21
0
ファイル: listener.py プロジェクト: JianfuLi/hamal
 def __init__(self, connection_settings, server_id, blocking=True, resume_stream=True):
     self._stream = BinLogStreamReader(
         connection_settings=connection_settings,
         server_id=server_id,
         only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent],
         blocking=blocking,
         resume_stream=resume_stream
     )
コード例 #22
0
    def _binlog_loader(self):
        """
        read row from binlog
        """
        if self.is_binlog_sync:
            resume_stream = True
            logging.info("Resume from binlog_file: {file}  binlog_pos: {pos}".format(file=self.log_file,
                                                                                     pos=self.log_pos))
        else:
            resume_stream = False

        stream = BinLogStreamReader(connection_settings=self.binlog_conf,
                                    server_id=self.config['mysql']['server_id'],
                                    only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent],
                                    only_tables=[self.config['mysql']['table']],
                                    resume_stream=resume_stream,
                                    blocking=True,
                                    log_file=self.log_file,
                                    log_pos=self.log_pos)
        for binlogevent in stream:
            self.log_file = stream.log_file
            self.log_pos = stream.log_pos
            for row in binlogevent.rows:
                if isinstance(binlogevent, DeleteRowsEvent):
                    rv = {
                        'action': 'delete',
                        'doc': row['values']
                    }
                elif isinstance(binlogevent, UpdateRowsEvent):
                    rv = {
                        'action': 'update',
                        'doc': row['after_values']
                    }
                elif isinstance(binlogevent, WriteRowsEvent):
                    rv = {
                        'action': 'index',
                        'doc': row['values']
                    }
                else:
                    logging.error('unknown action type in binlog')
                    raise TypeError('unknown action type in binlog')
                yield rv
                # print(rv)
        stream.close()
        raise IOError('mysql connection closed')
コード例 #23
0
    def test_filtering_events(self):
        self.stream.close()
        self.stream = BinLogStreamReader(self.database, server_id=1024, only_events=[QueryEvent])
        query = "CREATE TABLE test (id INT NOT NULL AUTO_INCREMENT, data VARCHAR (50) NOT NULL, PRIMARY KEY (id))"
        self.execute(query)

        event = self.stream.fetchone()
        self.assertIsInstance(event, QueryEvent)
        self.assertEqual(event.query, query)
def doRep(logPosConObj,MYSQL_SETTINGS):
	key = MYSQL_SETTINGS["host"]+":"+str(MYSQL_SETTINGS["port"])
	try:
		stream = BinLogStreamReader(
			connection_settings=MYSQL_SETTINGS,server_id=100,
			only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent, RotateEvent,QueryEvent],blocking=True,
			log_file=logPosConObj["log_file"],log_pos=logPosConObj["log_pos"])
		for binlogevent in stream:
			#prefix = "%s:%s:" % (binlogevent.schema, binlogevent.table)
		
			if isinstance(binlogevent, RotateEvent):
				#pprint (vars(binlogevent.packet))
				logPosConObj["log_file"]=binlogevent.packet.event.next_binlog
				logPosConObj["log_pos"]=binlogevent.packet.log_pos
				#logPosObject.setData(logPosConObj)
				continue
			if isinstance(binlogevent, QueryEvent):
				#pprint (vars(binlogevent.packet))
				sendMsg(key,binlogevent.query,binlogevent.timestamp)
				#logPosObject.setData(logPosConObj)
				continue
			for row in binlogevent.rows:
				#dbtable = binlogevent.schema+"_"+binlogevent.table
				if isinstance(binlogevent, DeleteRowsEvent):
					#print 'DeleteRowsEvent'
					sendMsg(key,row.get("values",object),binlogevent.timestamp)
					#func(row.get("values",object))
				elif isinstance(binlogevent, UpdateRowsEvent):
					#print 'UpdateRowsEvent'
					#print row
					sendMsg(key,row,binlogevent.timestamp)
					#func(row.get("after_values",object))
				elif isinstance(binlogevent, WriteRowsEvent):
					#print 'WriteRowsEvent'
					#print row
					sendMsg(key,row.get("values",object),binlogevent.timestamp)
					#func(row.get("values",object))
				#logPosConObj["log_pos"]=binlogevent.packet.log_pos
				#logPosObject.setData(logPosConObj)
		
		stream.close()
	except BaseException,e :
		print(e)
		return
コード例 #25
0
def main():
    # server_id is your slave identifier, it should be unique.
    # set blocking to True if you want to block and wait for the next event at
    # the end of the stream
    stream = BinLogStreamReader(connection_settings=MYSQL_SETTINGS,
                                server_id=3,
                                blocking=True)

    for binlogevent in stream:
        #print binlogevent
        #if isinstance(binlogevent, QueryEvent):
        #    print binlogevent.query
        if isinstance(binlogevent, WriteRowsEvent):
            for rows in binlogevent.rows:
                print rows
                #print binlogevent.query
        #binlogevent.dump()

    stream.close()
コード例 #26
0
    def test_filtering_table_event_with_ignored_tables(self):
        self.stream.close()
        self.assertEqual(self.bin_log_format(), "ROW")
        self.stream = BinLogStreamReader(self.database,
                                         server_id=1024,
                                         only_events=[WriteRowsEvent],
                                         ignored_tables=["test_2"])

        query = "CREATE TABLE test_2 (id INT NOT NULL AUTO_INCREMENT, data VARCHAR (50) NOT NULL, PRIMARY KEY (id))"
        self.execute(query)
        query = "CREATE TABLE test_3 (id INT NOT NULL AUTO_INCREMENT, data VARCHAR (50) NOT NULL, PRIMARY KEY (id))"
        self.execute(query)

        self.execute("INSERT INTO test_2 (data) VALUES ('alpha')")
        self.execute("INSERT INTO test_3 (data) VALUES ('alpha')")
        self.execute("INSERT INTO test_2 (data) VALUES ('beta')")
        self.execute("COMMIT")
        event = self.stream.fetchone()
        self.assertEqual(event.table, "test_3")
コード例 #27
0
ファイル: scan_binlog.py プロジェクト: xuclachina/dbatools
    def process_binlog(self):
        stream = BinLogStreamReader(connection_settings=self.conn_setting, server_id=self.server_id,
                                    log_file=self.start_file, log_pos=self.start_pos, only_schemas=self.only_schemas,
                                    only_tables=self.only_tables, resume_stream=True, blocking=True)

        flag_last_event = False
        e_start_pos, last_pos = stream.log_pos, stream.log_pos
        count_dict = {"insert": 0, "delete": 0, "update": 0}
        # to simplify code, we do not use flock for tmp_file.
        tmp_file = create_unique_file('%s.%s' % (
            self.conn_setting['host'], self.conn_setting['port']))
        with open(tmp_file, "w") as f_tmp, self.connection as cursor:
            for binlog_event in stream:
                if isinstance(binlog_event, QueryEvent) and binlog_event.query == 'BEGIN':
                    e_start_pos = last_pos
                    trans_start_time = binlog_event.timestamp
                    count_dict = {"insert": 0, "delete": 0, "update": 0}

                if isinstance(binlog_event, QueryEvent) and not self.only_dml:
                    count_dict = update_dml_count(cursor=cursor, binlog_event=binlog_event,
                                                              flashback=False, no_pk=self.no_pk, count_dict=count_dict)
                elif is_dml_event(binlog_event) and event_type(binlog_event) in self.sql_type:
                    for row in binlog_event.rows:
                        count_dict = update_dml_count(cursor=cursor, binlog_event=binlog_event, no_pk=self.no_pk,
                                                                  row=row, flashback=False, e_start_pos=e_start_pos, count_dict=count_dict)

                elif isinstance(binlog_event, XidEvent):
                    last_pos = binlog_event.packet.log_pos
                    trans_size = last_pos - e_start_pos
                    trans_end_time = binlog_event.timestamp
                    duration = trans_end_time - trans_start_time
                    print("trans_size: %s insert_count:%s update_count%s delete_count%s duration:%s" % (trans_size, count_dict["insert"], count_dict["update"], count_dict["delete"], duration))

                if isinstance(binlog_event, RotateEvent):
                    last_pos = 4
                elif not (isinstance(binlog_event, RotateEvent) or isinstance(binlog_event, FormatDescriptionEvent)):
                    last_pos = binlog_event.packet.log_pos

                if flag_last_event:
                    break
            stream.close()
            f_tmp.close()
        return True
コード例 #28
0
def sync_binlog_stream(mysql_conn, config, binlog_streams, state):
    binlog_streams_map = generate_streams_map(binlog_streams)

    for tap_stream_id in binlog_streams_map.keys():
        common.whitelist_bookmark_keys(BOOKMARK_KEYS, tap_stream_id, state)

    log_file, log_pos = calculate_bookmark(mysql_conn, binlog_streams_map,
                                           state)

    verify_log_file_exists(mysql_conn, log_file, log_pos)

    if config.get("server_id"):
        server_id = int(config.get("server_id"))
        LOGGER.info("Using provided server_id=%s", server_id)
    else:
        server_id = fetch_server_id(mysql_conn)
        LOGGER.info("No server_id provided, will use global server_id=%s",
                    server_id)

    connection_wrapper = make_connection_wrapper(config)

    try:
        reader = BinLogStreamReader(
            connection_settings={},
            server_id=server_id,
            slave_uuid="stitch-slave-{}".format(server_id),
            log_file=log_file,
            log_pos=log_pos,
            resume_stream=True,
            only_events=[
                RotateEvent, WriteRowsEvent, UpdateRowsEvent, DeleteRowsEvent
            ],
            pymysql_wrapper=connection_wrapper,
        )
        LOGGER.info("Starting binlog replication with log_file=%s, log_pos=%s",
                    log_file, log_pos)
        _run_binlog_sync(mysql_conn, reader, binlog_streams_map, state)
    finally:
        # BinLogStreamReader doesn't implement the `with` methods
        # So, try/finally will close the chain from the top
        reader.close()

    singer.write_message(singer.StateMessage(value=copy.deepcopy(state)))
コード例 #29
0
def main():
    # server_id is your slave identifier, it should be unique.
    # set blocking to True if you want to block and wait for the next event at
    # the end of the stream
    stream = BinLogStreamReader(connection_settings=MYSQL_SETTINGS,
                                server_id=3,
                                blocking=True)

    for binlogevent in stream:
        #print binlogevent
        #if isinstance(binlogevent, QueryEvent):
        #    print binlogevent.query
        if isinstance(binlogevent, WriteRowsEvent):
            for rows in binlogevent.rows:
                print rows
                #print binlogevent.query
        #binlogevent.dump()

    stream.close()
コード例 #30
0
 def setUp(self):
     super(TestCTLConnectionSettings, self).setUp()
     self.stream.close()
     ctl_db = copy.copy(self.database)
     ctl_db["db"] = None
     ctl_db["port"] = 3307
     self.ctl_conn_control = pymysql.connect(**ctl_db)
     self.ctl_conn_control.cursor().execute("DROP DATABASE IF EXISTS pymysqlreplication_test")
     self.ctl_conn_control.cursor().execute("CREATE DATABASE pymysqlreplication_test")
     self.ctl_conn_control.close()
     ctl_db["db"] = "pymysqlreplication_test"
     self.ctl_conn_control = pymysql.connect(**ctl_db)
     self.stream = BinLogStreamReader(
         self.database,
         ctl_connection_settings=ctl_db,
         server_id=1024,
         only_events=(WriteRowsEvent,),
         fail_on_table_metadata_unavailable=True
     )
コード例 #31
0
    def test_skip_to_timestamp(self):
        self.stream.close()
        query = "CREATE TABLE test_1 (id INT NOT NULL AUTO_INCREMENT, data VARCHAR (50) NOT NULL, PRIMARY KEY (id))"
        self.execute(query)
        time.sleep(1)
        query = "SELECT UNIX_TIMESTAMP();"
        timestamp = self.execute(query).fetchone()[0]
        query2 = "CREATE TABLE test_2 (id INT NOT NULL AUTO_INCREMENT, data VARCHAR (50) NOT NULL, PRIMARY KEY (id))"
        self.execute(query2)

        self.stream = BinLogStreamReader(
            self.database,
            server_id=1024,
            skip_to_timestamp=timestamp,
            ignored_events=self.ignoredEvents(),
        )
        event = self.stream.fetchone()
        self.assertIsInstance(event, QueryEvent)
        self.assertEqual(event.query, query2)
コード例 #32
0
def main():
    with utils.lower_privileges(self._logger):
        if BinLogStreamReader is None:
            utils.err("error: Python module `pymysqlreplication' is missing")
            return 1
        if pymysql is None:
            utils.err("error: Python module `pymysql' is missing")
            return 1
        settings = zabbix_bridge_conf.get_settings()

        # Set blocking to True if you want to block and wait for the next event at
        # the end of the stream
        stream = BinLogStreamReader(connection_settings=settings['mysql'],
                                    server_id=settings['slaveid'],
                                    only_events=[WriteRowsEvent],
                                    resume_stream=True,
                                    blocking=True)

        hostmap = gethostmap(settings)  # Prime initial hostmap
        for binlogevent in stream:
            if binlogevent.schema == settings['mysql']['db']:
                table = binlogevent.table
                log_pos = binlogevent.packet.log_pos
                if table == 'history' or table == 'history_uint':
                    for row in binlogevent.rows:
                        r = row['values']
                        itemid = r['itemid']
                        try:
                            hm = hostmap[itemid]
                            print "zbx.%s %d %s host=%s proxy=%s" % (
                                hm['key'], r['clock'], r['value'], hm['host'],
                                hm['proxy'])
                        except KeyError:
                            # TODO: Consider https://wiki.python.org/moin/PythonDecoratorLibrary#Retry
                            hostmap = gethostmap(settings)
                            utils.err("error: Key lookup miss for %s" %
                                      (itemid))
                    sys.stdout.flush()
                    # if n seconds old, reload
                    # settings['gethostmap_interval']

        stream.close()
コード例 #33
0
    def test_log_pos(self):
        query = "CREATE TABLE test (id INT NOT NULL AUTO_INCREMENT, data VARCHAR (50) NOT NULL, PRIMARY KEY (id))"
        self.execute(query)
        query = "INSERT INTO test (data) VALUES('Hello')"
        self.execute(query)
        self.execute("COMMIT")

        for i in range(6):
            self.stream.fetchone()
        # record position after insert
        log_file, log_pos = self.stream.log_file, self.stream.log_pos

        query = "UPDATE test SET data = 'World' WHERE id = 1"
        self.execute(query)
        self.execute("COMMIT")

        # resume stream from previous position
        if self.stream is not None:
            self.stream.close()
        self.stream = BinLogStreamReader(connection_settings=self.database,
                                         resume_stream=True,
                                         log_file=log_file,
                                         log_pos=log_pos)

        # RotateEvent
        self.stream.fetchone()
        # FormatDescription
        self.stream.fetchone()
        # XidEvent
        self.stream.fetchone()
        # QueryEvent for the BEGIN
        self.stream.fetchone()

        event = self.stream.fetchone()
        self.assertIsInstance(event, TableMapEvent)

        event = self.stream.fetchone()
        self.assertIsInstance(event, UpdateRowsEvent)

        self.assertIsInstance(self.stream.fetchone(), XidEvent)

        self.assertIsNone(self.stream.fetchone())
コード例 #34
0
    def __init__(self, config,
                 server_id,
                 blocking,
                 resume_stream,
                 log_file=None,
                 log_pos=None,
                 auto_position=None):

        self.event_stream = BinLogStreamReader(
            connection_settings=config.BINLOG_MYSQL,
            server_id=server_id,
            blocking=blocking,
            resume_stream=resume_stream,
            log_file=log_file,
            log_pos=log_pos,
            auto_position=auto_position
        )
        self._SKIP_SCHEMAS = config.SKIP_SCHEMAS
        self._ALLOW_TABLES = config.ALLOW_TABLES
        self._IGNORE_DDL = config.IGNORE_DDL
コード例 #35
0
def main():
    print 'Replicator Started'
    io = Emitter(dict(
        host=settings.SOCKETIO_SETTINGS['host'],
        port=settings.SOCKETIO_SETTINGS['port']
        )).Of(settings.SOCKETIO_SETTINGS['namespace'])
    stream = BinLogStreamReader(connection_settings=settings.MYSQL_SETTINGS,
                                server_id=3,
                                blocking=True,
                                resume_stream=True,
                                only_events=[UpdateRowsEvent],
                                only_tables=[settings.DB_SETTINGS['source_table']])

    for binlogevent in stream:
         for row in binlogevent.rows:
             vals = row["after_values"]
             print 'Updated rows for ' + json.dumps(vals)
             io.Emit('update', json.dumps(vals))

    stream.close()
コード例 #36
0
    def run(self):
        while True:
            self.stream = BinLogStreamReader(
                connection_settings=MYSQL_SETTINGS,
                only_events=[WriteRowsEvent],  # [DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent],
                server_id=3,
                slave_heartbeat=1
                ,skip_to_timestamp = self.skip_to_timestamp)

            # self.idx = 0
            # self.show_slave_status()
            for binlogevent in self.stream:
                prefix = "%s:%s:" % (binlogevent.schema, binlogevent.table)
                if prefix == self.target_schema + ":" + self.target_table + ":":
                    # print(binlogevent.rows)
                    for new_update_row in binlogevent.rows:
                        logging.info(" >>> find new row {} time: {}".format(new_update_row, binlogevent.timestamp))
                        # format for a row: {'values':
                        #       {'video_id': 1, 'frame_id': 4, 'insert_time': datetime.datetime(2008, 6, 19, 0, 0),
                        #       'frame_loc': 'RANDOM_LL', 'detect_flag': boolean, 'result_loc': None}}

                        # if new_update_row["values"]["detect_flag"] == 0 and  \
                        #         (new_update_row["values"]['insert_time'] is not None or new_update_row["values"]['insert_time'] != '' )\
                        #         and  ((new_update_row["values"]['frame_id'] > 170 and new_update_row["values"]['frame_id' ] < 200)
                        #               or (new_update_row["values"]['frame_id'] > 1010 and new_update_row["values"]['frame_id' ] < 1035)
                        #               or (new_update_row["values"]['frame_id'] > 1155 and new_update_row["values"]['frame_id' ] < 1177)
                        #               or (new_update_row["values"]['frame_id'] > 1312 and new_update_row["values"]['frame_id' ] < 1316)
                        #               or (new_update_row["values"]['frame_id'] > 2127 and new_update_row["values"]['frame_id' ] < 2150)):

                        if new_update_row["values"]["detect_flag"] == 0 and  \
                                (new_update_row["values"]['insert_time'] is not None or new_update_row["values"]['insert_time'] != ''
                                 ):
                            #logging.info(" >>> for this row, the flag is {}".format(new_update_row['detect_flag']))
                            try:
                                self.share_image_queue.put(new_update_row["values"],True,1)
                                logging.info(" >>> adding 1 image to queue {}".format(new_update_row))
                            except Exception as e:
                                logging.error(e)
                            self.skip_to_timestamp = convert_to_second_int(datetime.datetime.now())

            time.sleep(2)
コード例 #37
0
ファイル: sync_delta.py プロジェクト: yxb0926/mysql-cmd-tools
def main():
    ## 几个初始化工作
    parse_conf()
    set_mysql_setting()
    parse_binlog_conf()

    stream = BinLogStreamReader(
        connection_settings= MYSQL_SETTINGS,
        server_id          = serverid,
        resume_stream      = True,
        log_file           = binlog_info["binlog"],
        log_pos            = int(binlog_info["position"]),
        only_events        = [DeleteRowsEvent, 
                              WriteRowsEvent, 
                              UpdateRowsEvent,
                              RotateEvent,
                              XidEvent],
        blocking           = True)

    currentbinlogfilename = ""
    currentbinlogposition = ""
    for binlogevent in stream:
        if isinstance(binlogevent, RotateEvent):
            currentbinlogfilename = binlogevent.next_binlog
            currentbinlogposition = binlogevent.position
            print currentbinlogfilename, currentbinlogposition
            #sync_binlog_to_file(currentbinlogfilename,currentbinlogposition)
        elif isinstance(binlogevent, XidEvent):
            currentbinlogposition = binlogevent.packet.log_pos
            print currentbinlogfilename, currentbinlogposition
            #sync_binlog_to_file(currentbinlogfilename,currentbinlogposition)
        elif (binlogevent.schema == sourcedb_conf["db_dbname"] and 
                binlogevent.table == sourcedb_conf["db_table"]):
            for row in binlogevent.rows:
                if isinstance(binlogevent, DeleteRowsEvent):
                    syncDelete(row)
                elif isinstance(binlogevent, UpdateRowsEvent):
                    syncUpdate(row)
                elif isinstance(binlogevent, WriteRowsEvent):
                    syncInsert(row)
    stream.close()
コード例 #38
0
 def factory(self, binlog_metadata: BinLogMetadata) -> BinLogStreamReader:
     if binlog_metadata is None:
         return BinLogStreamReader(
             connection_settings=mysql_settings,
             server_id=server_id,
             only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent],
             freeze_schema=True
             #,only_tables=[event_table]
         )
     else:
         return BinLogStreamReader(
             connection_settings=mysql_settings,
             server_id=server_id,
             log_file=binlog_metadata.log_file,
             log_pos=binlog_metadata.log_pos,
             resume_stream=True,
             auto_position=False,
             only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent],
             freeze_schema=True
             #,only_tables=[event_table]
         )
コード例 #39
0
ファイル: binlog_parse.py プロジェクト: zyt325/DailyDocker
def binlog_dump():
    mysql_settings = {
        'host': 'docker03.base-fx.com',
        'port': 3306,
        'user': '******',
        'passwd': 'mysql325'
    }
    stream = BinLogStreamReader(connection_settings=mysql_settings,
                                server_id=100,
                                blocking=True)
    for binlogevent in stream:
        binlogevent.dump()
コード例 #40
0
    def test_drop_table_tablemetadata_unavailable(self):
        self.stream.close()
        self.execute("CREATE TABLE test (id INTEGER(11))")
        self.execute("INSERT INTO test VALUES (1)")
        self.execute("DROP TABLE test")
        self.execute("COMMIT")

        self.stream = BinLogStreamReader(
            self.database,
            server_id=1024,
            only_events=(WriteRowsEvent, ),
            fail_on_table_metadata_unavailable=True)
        had_error = False
        try:
            event = self.stream.fetchone()
        except TableMetadataUnavailableError as e:
            had_error = True
            assert "test" in e.args[0]
        finally:
            self.resetBinLog()
            assert had_error
コード例 #41
0
ファイル: utils.py プロジェクト: MarkWh1te/mysql2kafka
def create_stream(log_file=None, log_pos=None):
    return BinLogStreamReader(
        connection_settings=mysql_settings,
        server_id=1,
        blocking=True,
        log_file=log_file,
        # only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent,RotateEvent,QueryEvent],
        only_events=[
            DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent, QueryEvent
        ],
        log_pos=log_pos,
        resume_stream=True)
コード例 #42
0
    def test_drop_column(self):
        self.stream.close()
        self.execute(
            "CREATE TABLE test_drop_column (id INTEGER(11), data VARCHAR(50))")
        self.execute("INSERT INTO test_drop_column VALUES (1, 'A value')")
        self.execute("COMMIT")
        self.execute("ALTER TABLE test_drop_column DROP COLUMN data")
        self.execute("INSERT INTO test_drop_column VALUES (2)")
        self.execute("COMMIT")

        self.stream = BinLogStreamReader(self.database,
                                         server_id=1024,
                                         only_events=(WriteRowsEvent, ))
        try:
            self.stream.fetchone()  # insert with two values
            self.stream.fetchone()  # insert with one value
        except Exception as e:
            self.fail(
                "raised unexpected exception: {exception}".format(exception=e))
        finally:
            self.resetBinLog()
 def __init__(self,
              mysql_settings,
              server_id=1,
              blocking=False,
              resume_stream=True,
              log_file=None,
              log_pos=None,
              slave_heartbeat=None):
     self.__stream = BinLogStreamReader(
         connection_settings=mysql_settings,
         server_id=server_id,
         blocking=blocking,
         resume_stream=resume_stream,
         only_events=[
             DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent,
             TableMapEvent, BeginLoadQueryEvent, ExecuteLoadQueryEvent,
             QueryEvent
         ],  # RotateEvent, QueryEvent, HeartbeatLogEvent
         log_file=log_file,
         log_pos=log_pos,
         slave_heartbeat=slave_heartbeat)
コード例 #44
0
def main():
    print 'Replicator Started'
    io = Emitter(
        dict(host=settings.SOCKETIO_SETTINGS['host'],
             port=settings.SOCKETIO_SETTINGS['port'])).Of(
                 settings.SOCKETIO_SETTINGS['namespace'])
    stream = BinLogStreamReader(
        connection_settings=settings.MYSQL_SETTINGS,
        server_id=3,
        blocking=True,
        resume_stream=True,
        only_events=[UpdateRowsEvent],
        only_tables=[settings.DB_SETTINGS['source_table']])

    for binlogevent in stream:
        for row in binlogevent.rows:
            vals = row["after_values"]
            print 'Updated rows for ' + json.dumps(vals)
            io.Emit('update', json.dumps(vals))

    stream.close()
コード例 #45
0
 def _seek(self, source_database_config, tracker_database_config,
           allowed_event_types, position, only_tables):
     self.stream = BinLogStreamReader(
         connection_settings=source_database_config,
         ctl_connection_settings=tracker_database_config,
         server_id=self.get_unique_server_id(),
         blocking=True,
         only_events=allowed_event_types,
         resume_stream=config.env_config.resume_stream,
         only_tables=only_tables,
         fail_on_table_metadata_unavailable=True,
         **position.to_replication_dict())
コード例 #46
0
    def _binlog_sync(self):
        if not self.sync_binlog_open:
            return

        dbs, tables = self.all_dbs_and_tables()
        stream = BinLogStreamReader(connection_settings=self.mysql_conn_d,
                                    server_id=self.mysql_conf['server_id'],
                                    only_events=[
                                        DeleteRowsEvent,
                                        WriteRowsEvent,
                                        UpdateRowsEvent,
                                        RotateEvent,
                                    ],
                                    only_schemas=dbs,
                                    only_tables=tables,
                                    resume_stream=True,
                                    blocking=True,
                                    log_file=self.log_file,
                                    log_pos=self.log_pos)

        for be in stream:
            self.log_file = stream.log_file
            self.log_pos = stream.log_pos

            if isinstance(be, RotateEvent):
                self._save_binlog_record()
                continue

            for row in be.rows:
                schema = be.schema
                table = be.table
                if isinstance(be, WriteRowsEvent):
                    self._handle_sync('create', schema, table, row['values'])
                elif isinstance(be, UpdateRowsEvent):
                    self._handle_sync('update', schema, table,
                                      row['after_values'])
                elif isinstance(be, DeleteRowsEvent):
                    self._handle_sync('delete', schema, table, row['values'])

        stream.close()
コード例 #47
0
def main_binlog(only_schemas=only_schemas,
                server_id=server_id,
                hi_table_map=None):
    stream = BinLogStreamReader(
        connection_settings=mysql_settings,
        server_id=server_id,
        blocking=blocking,
        resume_stream=resume_stream,
        only_events=only_events,
        ignored_events=ignored_events,
        auto_position=auto_position,
        only_tables=only_tables,
        ignored_tables=ignored_tables,
        only_schemas=only_schemas,
        ignored_schemas=ignored_schemas,
        log_file=get_new_binlog_pos(binlogfile_Label_file)[0],
        log_pos=int(get_new_binlog_pos(binlogfile_Label_file)[1]))
    try:
        for binlogevent in stream:
            info = binlogevent.dump()
            safety_shutdown()
            batch_sql.cntrast_insert_class_tab(info, hi_table_map)
            if info['class'] == 'RotateEvent':
                analysis_rotate_event(info)
                init_binlog_file_name = "%s\n" % info["Next binlog file"]
            elif info['class'] == 'FormatDescriptionEvent':
                analysis_format_description_event(info)
            elif info['class'] == "GtidEvent":
                analysis_gtid_event(info, init_binlog_file_name)
            elif info['class'] == "QueryEvent":
                analysis_query_event(info, init_binlog_file_name)
            elif info['class'] == "TableMapEvent":
                table_map, hi_table_map = analysis_table_map_event(
                    info, init_binlog_file_name)
            elif info['class'] == "UpdateRowsEvent":
                analysis_update_rows_event(info, init_binlog_file_name,
                                           table_map, hi_table_map)
            elif info['class'] == "WriteRowsEvent":
                analysis_write_rows_event(info, init_binlog_file_name,
                                          table_map, hi_table_map)
            elif info['class'] == "XidEvent":
                analysis_xid_event(info, init_binlog_file_name)
            elif info['class'] == "DeleteRowsEvent":
                analysis_delete_rows_event(info, init_binlog_file_name,
                                           hi_table_map, table_map)
            elif info['class'] == "StopEvent":
                analysis_stop_event(info, init_binlog_file_name)
            else:
                loging.warning(info)
    except Exception as er:
        loging.critical("The connection source DB has an exception, "
                        "please check the configuration information:%s " % er)
コード例 #48
0
    def process_binlog(self):
        """
        伪装 slave 获取数据库中的 binlog
        :return:
        """
        stream = BinLogStreamReader(
            connection_settings=self.coon_settings,
            server_id=100,
            log_file=self.log_file,
            log_pos=self.log_pos,
            only_schemas=self.only_schemas,
            only_tables=self.only_tables,
            # blocking=True,
            resume_stream=self.resume_stream,
            only_events=[
                DeleteRowsEvent,
                UpdateRowsEvent,
                WriteRowsEvent
            ],
        )

        tem_file = TemporaryFile(mode='w+')
        for binlog_event in stream:
            for row in binlog_event.rows:
                # print(binlog_event.extra_data_length)
                # print(binlog_event.packet.log_pos)
                try:
                    event_time = datetime.datetime.fromtimestamp(binlog_event.timestamp)
                except OSError:
                    event_time = datetime.datetime(1980, 1, 1, 0, 0)

                event = {"schema": binlog_event.schema, "table": binlog_event.table,
                         "event_time": event_time.strftime("%Y-%m-%d %H:%M:%S")}

                if isinstance(binlog_event, DeleteRowsEvent):
                    event["action"] = "delete"
                    event["data"] = clear_event_type(row["values"])

                elif isinstance(binlog_event, UpdateRowsEvent):
                    event["action"] = "update"
                    event["before_data"] = clear_event_type(row["before_values"])
                    event["after_data"] = clear_event_type(row["after_values"])

                elif isinstance(binlog_event, WriteRowsEvent):
                    event["action"] = "insert"
                    event["data"] = clear_event_type(row["values"])

                tem_file.write(self.reverse_sql(event)+'\n')

        tem_file.seek(0)
        for x in tem_file.readlines()[::-1]:
            print(x)
コード例 #49
0
def main():
    stream = BinLogStreamReader(
        connection_settings=MYSQL_SETTINGS,
        only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent])

    for binlogevent in stream:
        for row in binlogevent.rows:
            event = {"schema": binlogevent.schema, "table": binlogevent.table}

            if isinstance(binlogevent, DeleteRowsEvent):
                event["action"] = "delete"
                event = dict(event.items() + row["values"].items())
            elif isinstance(binlogevent, UpdateRowsEvent):
                event["action"] = "update"
                event = dict(event.items() + row["after_values"].items())
            elif isinstance(binlogevent, WriteRowsEvent):
                event["action"] = "insert"
                event = dict(event.items() + row["values"].items())
            print json.dumps(event)
            sys.stdout.flush()

    stream.close()
コード例 #50
0
def main():
    utils.drop_privileges()
    if BinLogStreamReader is None:
        utils.err("error: Python module `pymysqlreplication' is missing")
        return 1
    if pymysql is None:
        utils.err("error: Python module `pymysql' is missing")
        return 1
    settings = zabbix_bridge_conf.get_settings()

    # Set blocking to True if you want to block and wait for the next event at
    # the end of the stream
    stream = BinLogStreamReader(connection_settings=settings['mysql'],
                                server_id=settings['slaveid'],
                                only_events=[WriteRowsEvent],
                                resume_stream=True,
                                blocking=True)

    hostmap = gethostmap(settings) # Prime initial hostmap
    for binlogevent in stream:
        if binlogevent.schema == settings['mysql']['db']:
            table = binlogevent.table
            log_pos = binlogevent.packet.log_pos
            if table == 'history' or table == 'history_uint':
                for row in binlogevent.rows:
                    r = row['values']
                    itemid = r['itemid']
                    try:
                        hm = hostmap[itemid]
                        print "zbx.%s %d %s host=%s proxy=%s" % (hm['key'], r['clock'], r['value'], hm['host'], hm['proxy'])
                    except KeyError:
                        # TODO: Consider https://wiki.python.org/moin/PythonDecoratorLibrary#Retry
                        hostmap = gethostmap(settings)
                        utils.err("error: Key lookup miss for %s" % (itemid))
                sys.stdout.flush()
                # if n seconds old, reload
                # settings['gethostmap_interval']

    stream.close()
コード例 #51
0
def main():
    stream = BinLogStreamReader(
        connection_settings={
            "host": '127.0.0.1',
            "port": 3306,
            "user": '******',
            "passwd": '12345Aa.',
        },
        server_id=1,
        blocking=True,
        resume_stream=True,
        only_events=[DeleteRowsEvent, UpdateRowsEvent, WriteRowsEvent],
        auto_position=0,
    )

    for bin_log_event in stream:
        for row in bin_log_event.rows:
            event = {
                "schema": bin_log_event.schema,
                "table": bin_log_event.table,
                "log_pos": bin_log_event.packet.log_pos
            }
            if isinstance(bin_log_event, DeleteRowsEvent):
                event["action"] = "delete"
                event["values"] = dict(row["values"].items())
                event = dict(event.items())
            elif isinstance(bin_log_event, UpdateRowsEvent):
                event["action"] = "update"
                event["before_values"] = dict(row["before_values"].items())
                event["after_values"] = dict(row["after_values"].items())
                event = dict(event.items())
            elif isinstance(bin_log_event, WriteRowsEvent):
                event["action"] = "insert"
                event["values"] = dict(row["values"].items())
                event = dict(event.items())
            json.dumps(event)
            sys.stdout.flush()

    stream.close()
コード例 #52
0
def main():
    stream = BinLogStreamReader(
        connection_settings=MYSQL_SETTINGS,
        server_id=220,
        only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent]

    )

    for binlogevent in stream:

        for row in binlogevent.rows:
            event = {"schema": binlogevent.schema, "table": binlogevent.table}
            # print row
            # print "row values"
            # print row["values"]
            # print row['values'].items()
            # print "end row values"
            if isinstance(binlogevent, DeleteRowsEvent):
                event["action"] = "delete"
                event = dict(event.items() + row["values"].items())
                template = 'DELETE FROM `{0}`.`{1}` WHERE {2} LIMIT 1;'.format(
                    binlogevent.schema, binlogevent.table,
                    ' AND '.join(map(compare_items, row['values'].items()))
                )
                values = map(fix_object, row['values'].values())
                print template
                print "deleteing "
                print values
            elif isinstance(binlogevent, UpdateRowsEvent):
                event["action"] = "update"
                event = dict(event.items() + row["after_values"].items())
            elif isinstance(binlogevent, WriteRowsEvent):
                event["action"] = "insert"
                event = dict(event.items() + row["values"].items())
            # print json.dumps(event,cls=DjangoJSONEncoder)
            # sys.stdout.flush()


    stream.close()
コード例 #53
0
def main():
    r = redis.Redis()

    stream = BinLogStreamReader(
        connection_settings=MYSQL_SETTINGS,
        only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent])

    for binlogevent in stream:
        prefix = "%s:%s:" % (binlogevent.schema, binlogevent.table)

        for row in binlogevent.rows:
            if isinstance(binlogevent, DeleteRowsEvent):
                vals = row["values"]
                r.delete(prefix + str(vals["id"]))
            elif isinstance(binlogevent, UpdateRowsEvent):
                vals = row["after_values"]
                r.hmset(prefix + str(vals["id"]), vals)
            elif isinstance(binlogevent, WriteRowsEvent):
                vals = row["values"]
                r.hmset(prefix + str(vals["id"]), vals)

    stream.close()
コード例 #54
0
    def run_by_rows(self):
        try:
            stream = BinLogStreamReader(connection_settings=self.mysql_setting,
                                        server_id=101213112,
                                        only_events=[
                                            DeleteRowsEvent, WriteRowsEvent,
                                            UpdateRowsEvent, GtidEvent
                                        ],
                                        resume_stream=True,
                                        blocking=False,
                                        log_file=self.binlog_file,
                                        log_pos=self.start_pos,
                                        only_schemas=self.only_schemas,
                                        only_tables=self.only_tables)
            rows = []
            for binlogevent in stream:
                log_pos = stream.log_pos
                if log_pos >= self.end_pos:
                    print('binlog syncer exit...')
                    stream.close()
                    break
                else:
                    if binlogevent.event_type == GTID_LOG_EVENT:
                        # 此处获取每个事务的GTID
                        # 不做处理
                        gtid = binlogevent.gtid
                        rows.append(gtid)
                    else:
                        # 判断当前事务的GTID的影响行数是否等于传入的影响行数
                        # 由于pymysql执行无法获取到当前事务的GTID以及pymysqlreplication无法获取到binlog的thread_id
                        # 所以无法实现精确定位,只能通过该方式实现,可能存在多备份数据的情况
                        for row in binlogevent.rows:
                            binlog = {
                                'database': binlogevent.schema,
                                'table': binlogevent.table,
                                'primary_key': binlogevent.primary_key
                            }
                            if self.sql_type == 'DELETE':
                                if isinstance(binlogevent, DeleteRowsEvent):
                                    binlog['values'] = row["values"]
                                    binlog['type'] = 'DELETE'
                                    rows.append(binlog)
                            if self.sql_type == 'UPDATE':
                                if isinstance(binlogevent, UpdateRowsEvent):
                                    binlog["before"] = row["before_values"]
                                    binlog["after"] = row["after_values"]
                                    binlog['type'] = 'UPDATE'
                                    rows.append(binlog)
                            if self.sql_type == 'INSERT':
                                if isinstance(binlogevent, WriteRowsEvent):
                                    binlog['values'] = row["values"]
                                    binlog['type'] = 'INSERT'
                                    rows.append(binlog)
            stream.close()
            result = {'status': 'success', 'data': self._filter_gtid(rows)}
        except Exception as err:
            result = {'status': 'fail', 'msg': str(err)}

        return result
コード例 #55
0
def main():
    ## 几个初始化工作
    parse_conf()
    set_mysql_setting()
    parse_binlog_conf()

    stream = BinLogStreamReader(connection_settings=MYSQL_SETTINGS,
                                server_id=serverid,
                                resume_stream=True,
                                log_file=binlog_info["binlog"],
                                log_pos=int(binlog_info["position"]),
                                only_events=[
                                    DeleteRowsEvent, WriteRowsEvent,
                                    UpdateRowsEvent, RotateEvent, XidEvent
                                ],
                                blocking=True)

    currentbinlogfilename = ""
    currentbinlogposition = ""
    for binlogevent in stream:
        if isinstance(binlogevent, RotateEvent):
            currentbinlogfilename = binlogevent.next_binlog
            currentbinlogposition = binlogevent.position
            print currentbinlogfilename, currentbinlogposition
            #sync_binlog_to_file(currentbinlogfilename,currentbinlogposition)
        elif isinstance(binlogevent, XidEvent):
            currentbinlogposition = binlogevent.packet.log_pos
            print currentbinlogfilename, currentbinlogposition
            #sync_binlog_to_file(currentbinlogfilename,currentbinlogposition)
        elif (binlogevent.schema == sourcedb_conf["db_dbname"]
              and binlogevent.table == sourcedb_conf["db_table"]):
            for row in binlogevent.rows:
                if isinstance(binlogevent, DeleteRowsEvent):
                    syncDelete(row)
                elif isinstance(binlogevent, UpdateRowsEvent):
                    syncUpdate(row)
                elif isinstance(binlogevent, WriteRowsEvent):
                    syncInsert(row)
    stream.close()
コード例 #56
0
    def __init__(self, master_connect_setting,
                 server_id, log_file, log_pos,
                 outputer,
                 only_tables=None,
                 used_update_sql_tables=[]):
        '''
        used_update_sql_tables: tables which used `UPDATE` SQL instead of `REPLACE INTO` SQL.
        '''

        self.server_id = server_id
        self._master_connect_setting = master_connect_setting
        self._output = outputer
        assert callable(getattr(outputer, 'write')
                        ), 'outputer must hash interface `write(event)`'

        self._start_file = log_file
        self._start_pos = log_pos

        self.reading_file = self._start_pos
        self.reading_pos = self._start_file
        self.reading_timestamp = 0
        self.reading_event = 0

        self._master = sqlpool.SqlPool(**master_connect_setting)
        self._master_log_file = None
        self._master_log_pos = None
        self._stop = False
        self._running = False

        self._tables_desc = {}

        self._used_update_sql_tables = dict(
            [(t, True) for t in used_update_sql_tables])
        m = master_connect_setting
        hostname, user, password, port = socket.gethostname(), m['user'], m[
            'passwd'], m['port']

        self._stream = BinLogStreamReader(connection_settings=master_connect_setting,
                                          server_id=self.server_id,
                                          log_pos=self._start_pos, log_file=self._start_file,
                                          blocking=True, resume_stream=True,
                                          freeze_schema=True,
                                          only_tables=only_tables,
                                          report_slave=(
                                              hostname, user, password, port),
                                          slave_uuid=uuid.uuid4(),
                                          only_events=[QueryEvent,
                                                       WriteRowsEvent,
                                                       UpdateRowsEvent,
                                                       DeleteRowsEvent,
                                                       RotateEvent])
コード例 #57
0
 def run(self, ):
     stream = BinLogStreamReader(connection_settings=self.source_db_info,
                             server_id=3,
                             resume_stream=True,log_file=self.logfile, log_pos=self.logpos,
                             only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent],
                             only_tables=self.tables,only_schemas=self.schemas,
                             blocking=True)
     for binlogevent in stream:
         #prefix = "%s:%s" % (binlogevent.schema, binlogevent.table)
         for row in binlogevent.rows:
             if isinstance(binlogevent, DeleteRowsEvent):
                 vals = row["values"]
                 sql = '''delete from %s where %s = '%s' ''' % (binlogevent.table,self.columns[0],vals[self.columns[0]])
                 self.data_queue.put(sql)
                 self.log.info(sql)
             elif isinstance(binlogevent, UpdateRowsEvent):
                 vals = row["after_values"]
                 sql = ''' update %s set  ''' % binlogevent.table
                 for i in range(len(self.columns)):
                     if i == (len(self.columns)-1):
                         sql = sql + self.columns[i] + ' = ' +'\'' + str(vals[self.columns[i]]) +'\''
                     else:
                         sql = sql + self.columns[i] + ' = ' + '\'' + str(vals[self.columns[i]]) + '\'' + ' , '
                 sql = sql + " where %s =  '%s'" % (self.columns[0],vals[self.columns[0]])
                 self.data_queue.put(sql)
                 self.log.info(sql)
             elif isinstance(binlogevent, WriteRowsEvent):
                 vals = row["values"]
                 sql = '''insert into %s values (''' % binlogevent.table
                 for i in range(len(self.columns)):  
                     if i == (len(self.columns)-1):  
                         sql = sql + '\'' + str(vals[self.columns[i]]) + '\''
                     else:
                         sql = sql + '\'' + str(vals[self.columns[i]]) + '\'' + ','
                 sql = sql + ')'  
                 self.data_queue.put(sql)  
                 self.log.info(sql)
     stream.close()
コード例 #58
0
    def get_binlog_startime(self):
        stream = BinLogStreamReader(connection_settings=self.conn_setting,
                                    server_id=self.server_id,
                                    log_file=self.start_file,
                                    log_pos=self.start_pos,
                                    only_schemas=self.only_schemas,
                                    only_tables=self.only_tables,
                                    resume_stream=True,
                                    blocking=True)
        try:
            for binlog_event in stream:

                if binlog_event.timestamp:
                    event_time = datetime.datetime.fromtimestamp(
                        binlog_event.timestamp)
                    break

            stream.close()
            # event_time = datetime.datetime.fromtimestamp(stream[0].timestamp)
        except OSError:
            event_time = datetime.datetime(2015, 1, 1, 0, 0)

        return event_time
コード例 #59
0
ファイル: mysql-replication.py プロジェクト: hzy0809/code
def sync_sql_to_es():
    stream = BinLogStreamReader(
        connection_settings=MYSQL_SETTINGS,
        only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent],
        only_schemas=['bee'],
        only_tables=['user_article_model'],
        server_id=83,
        skip_to_timestamp=1628074154,
        blocking=True)

    for binlogevent in stream:
        prefix = "%s:%s:" % (binlogevent.schema, binlogevent.table)
        logger.info(f'SYNC {prefix.upper()}')
        timestamp = binlogevent.timestamp
        func = event_func_map[type(binlogevent)]
        for row in binlogevent.rows:
            data = func(row, timestamp)
            logger.info(
                f'timestamp[{timestamp}]<--->{func.__name__.upper()}<--->article[{data["id"]}]'
            )
            # print(data)
            write_to_es(func, data)
    stream.close()