def _execute_command(self, command, sql): if not self._writer: raise InterfaceError("(0, 'Not connected')") # If the last query was unbuffered, make sure it finishes before # sending new commands if self._result is not None and self._result.unbuffered_active: yield from self._result._finish_unbuffered_query() if isinstance(sql, str): sql = sql.encode(self._encoding) chunk_size = min(MAX_PACKET_LEN, len(sql) + 1) # +1 is for command prelude = struct.pack('<i', chunk_size) + int2byte(command) self._write_bytes(prelude + sql[:chunk_size - 1]) # logger.debug(dump_packet(prelude + sql)) if chunk_size < MAX_PACKET_LEN: return seq_id = 1 sql = sql[chunk_size - 1:] while True: chunk_size = min(MAX_PACKET_LEN, len(sql)) prelude = struct.pack('<i', chunk_size)[:3] data = prelude + int2byte(seq_id % 256) + sql[:chunk_size] self._write_bytes(data) # logger.debug(dump_packet(data)) sql = sql[chunk_size:] if not sql and chunk_size < MAX_PACKET_LEN: break seq_id += 1
def send_data(self): """Send data packets from the local file to the server""" if not self.connection._writer: raise InterfaceError("(0, '')") # sequence id is 2 as we already sent a query packet seq_id = 2 try: yield from self._open_file() chunk_size = MAX_PACKET_LEN while True: chunk = yield from self._file_read(chunk_size) if not chunk: break packet = (struct.pack('<i', len(chunk))[:3] + int2byte(seq_id)) format_str = '!{0}s'.format(len(chunk)) packet += struct.pack(format_str, chunk) self.connection._write_bytes(packet) seq_id += 1 finally: # send the empty packet to signify we are done sending data packet = struct.pack('<i', 0)[:3] + int2byte(seq_id) self.connection._write_bytes(packet)
def __connect_to_stream(self): self._stream_connection = pymysql.connect(**self.__connection_settings) cur = self._stream_connection.cursor() cur.execute("SHOW MASTER STATUS") (log_file, log_pos) = cur.fetchone()[:2] cur.close() # binlog_pos (4) -- position in the binlog-file to start the stream with # flags (2) BINLOG_DUMP_NON_BLOCK (0 or 1) # server_id (4) -- server id of this slave # binlog-filename (string.EOF) -- filename of the binlog on the master command = COM_BINLOG_DUMP prelude = struct.pack('<i', len(log_file) + 11) \ + int2byte(command) if self.__log_pos is None: if self.__resume_stream: prelude += struct.pack('<I', log_pos) else: prelude += struct.pack('<I', 4) else: prelude += struct.pack('<I', self.__log_pos) if self.__blocking: prelude += struct.pack('<h', 0) else: prelude += struct.pack('<h', 1) prelude += struct.pack('<I', self.__server_id) self._stream_connection.wfile.write(prelude + log_file.encode()) self._stream_connection.wfile.flush() self.__connected = True
def test_datatypes(connection, cursor, datatype_table): # insert values v = ( True, -3, 123456789012, 5.7, "hello'\" world", u"Espa\xc3\xb1ol", "binary\x00data".encode(connection.charset), datetime.date(1988, 2, 2), datetime.datetime.now().replace(microsecond=0), datetime.timedelta(5, 6), datetime.time(16, 32), time.localtime()) yield from cursor.execute( "INSERT INTO test_datatypes (b,i,l,f,s,u,bb,d,dt,td,t,st) " "values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", v) yield from cursor.execute( "select b,i,l,f,s,u,bb,d,dt,td,t,st from test_datatypes") r = yield from cursor.fetchone() assert util.int2byte(1) == r[0] # assert v[1:8] == r[1:8]) assert v[1:9] == r[1:9] # mysql throws away microseconds so we need to check datetimes # specially. additionally times are turned into timedeltas. # self.assertEqual(datetime.datetime(*v[8].timetuple()[:6]), r[8]) # TODO: figure out why this assert fails # assert [9] == r[9] # just timedeltas expected = datetime.timedelta(0, 60 * (v[10].hour * 60 + v[10].minute)) assert expected == r[10] assert datetime.datetime(*v[-1][:6]) == r[-1]
def connect_to_stream(self, custom_log_pos=None): self._stream_connection = pymysql.connect(**self.__connection_settings) (log_file, log_pos) = self.get_master_binlog_pos() # flags (2) BINLOG_DUMP_NON_BLOCK (0 or 1) # server_id (4) -- server id of this slave # binlog-filename (string.EOF) -- filename of the binlog on the master if custom_log_pos is not None: self.log_pos = custom_log_pos command = COM_BINLOG_DUMP prelude = struct.pack("<i", len(log_file) + 11) + int2byte(command) if self.log_pos is None: if self.__resume_stream: prelude += struct.pack("<I", log_pos) else: prelude += struct.pack("<I", self.starting_binlog_pos) else: prelude += struct.pack("<I", self.log_pos) if self.__blocking: prelude += struct.pack("<h", 0) else: prelude += struct.pack("<h", 1) prelude += struct.pack("<I", self.__server_id) self._stream_connection.wfile.write(prelude + log_file.encode()) self._stream_connection.wfile.flush() self.__connected = True
def __connect_to_stream(self): # log_pos (4) -- position in the binlog-file to start the stream with # flags (2) BINLOG_DUMP_NON_BLOCK (0 or 1) # server_id (4) -- server id of this slave # log_file (string.EOF) -- filename of the binlog on the master self._stream_connection = pymysql.connect(**self.__connection_settings) # only when log_file and log_pos both provided, the position info is # valid, if not, get the current position from master if self.log_file is None or self.log_pos is None: cur = self._stream_connection.cursor() cur.execute("SHOW MASTER STATUS") self.log_file, self.log_pos = cur.fetchone()[:2] cur.close() prelude = struct.pack('<i', len(self.log_file) + 11) \ + int2byte(COM_BINLOG_DUMP) if self.__resume_stream: prelude += struct.pack('<I', self.log_pos) else: prelude += struct.pack('<I', 4) if self.__blocking: prelude += struct.pack('<h', 0) else: prelude += struct.pack('<h', 1) prelude += struct.pack('<I', self.__server_id) prelude += self.log_file.encode() self._stream_connection.wfile.write(prelude) self._stream_connection.wfile.flush() self.__connected_stream = True
def test_datatypes(self): """ test every data type """ conn = self.connections[0] c = conn.cursor() c.execute("create table test_datatypes (b bit, i int, l bigint, f real, s varchar(32), u varchar(32), bb blob, d date, dt datetime, ts timestamp, td time, t time, st datetime)") try: # insert values v = (True, -3, 123456789012, 5.7, "hello'\" world", u"Espa\xc3\xb1ol", "binary\x00data".encode(conn.charset), datetime.date(1988,2,2), datetime.datetime(2014, 5, 15, 7, 45, 57), datetime.timedelta(5,6), datetime.time(16,32), time.localtime()) c.execute("insert into test_datatypes (b,i,l,f,s,u,bb,d,dt,td,t,st) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", v) c.execute("select b,i,l,f,s,u,bb,d,dt,td,t,st from test_datatypes") r = c.fetchone() self.assertEqual(util.int2byte(1), r[0]) self.assertEqual(v[1:10], r[1:10]) self.assertEqual(datetime.timedelta(0, 60 * (v[10].hour * 60 + v[10].minute)), r[10]) self.assertEqual(datetime.datetime(*v[-1][:6]), r[-1]) c.execute("delete from test_datatypes") # check nulls c.execute("insert into test_datatypes (b,i,l,f,s,u,bb,d,dt,td,t,st) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", [None] * 12) c.execute("select b,i,l,f,s,u,bb,d,dt,td,t,st from test_datatypes") r = c.fetchone() self.assertEqual(tuple([None] * 12), r) c.execute("delete from test_datatypes") # check sequence type c.execute("insert into test_datatypes (i, l) values (2,4), (6,8), (10,12)") c.execute("select l from test_datatypes where i in %s order by i", ((2,6),)) r = c.fetchall() self.assertEqual(((4,),(8,)), r) finally: c.execute("drop table test_datatypes")
def test_datatypes(self): """ test every data type """ conn = self.connections[0] c = yield from conn.cursor() yield from c.execute( "create table test_datatypes (b bit, i int, l bigint, f real, s " "varchar(32), u varchar(32), bb blob, d date, dt datetime, " "ts timestamp, td time, t time, st datetime)") try: # insert values v = (True, -3, 123456789012, 5.7, "hello'\" world", u"Espa\xc3\xb1ol", "binary\x00data".encode(conn.charset), datetime.date(1988, 2, 2), datetime.datetime.now().replace(microsecond=0), datetime.timedelta(5, 6), datetime.time(16, 32), time.localtime()) yield from c.execute( "insert into test_datatypes (b,i,l,f,s,u,bb,d,dt,td,t,st) " "values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", v) yield from c.execute( "select b,i,l,f,s,u,bb,d,dt,td,t,st from test_datatypes") r = yield from c.fetchone() self.assertEqual(util.int2byte(1), r[0]) # self.assertEqual(v[1:8], r[1:8]) self.assertEqual(v[1:9], r[1:9]) # mysql throws away microseconds so we need to check datetimes # specially. additionally times are turned into timedeltas. # self.assertEqual(datetime.datetime(*v[8].timetuple()[:6]), r[8]) self.assertEqual(v[9], r[9]) # just timedeltas self.assertEqual( datetime.timedelta(0, 60 * (v[10].hour * 60 + v[10].minute)), r[10]) self.assertEqual(datetime.datetime(*v[-1][:6]), r[-1]) yield from c.execute("delete from test_datatypes") # check nulls yield from c.execute( "insert into test_datatypes (b,i,l,f,s,u,bb,d,dt,td,t,st) " "values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", [None] * 12) yield from c.execute( "select b,i,l,f,s,u,bb,d,dt,td,t,st from test_datatypes") r = yield from c.fetchone() self.assertEqual(tuple([None] * 12), r) yield from c.execute("delete from test_datatypes") # check sequence type yield from c.execute( "insert into test_datatypes (i, l) values (2,4), (6,8), " "(10,12)") yield from c.execute( "select l from test_datatypes where i in %s order by i", ((2, 6), )) r = yield from c.fetchall() self.assertEqual(((4, ), (8, )), r) finally: yield from c.execute("drop table test_datatypes")
def _request_authentication(self): self.client_flag |= CAPABILITIES if self.server_version.startswith('5'): self.client_flag |= MULTI_RESULTS if self._user is None: raise ValueError("Did not specify a username") charset_id = charset_by_name(self._charset).id user = self._user if isinstance(self._user, str): user = self._user.encode(self._encoding) data_init = (struct.pack('<i', self.client_flag) + struct.pack("<I", 1) + int2byte(charset_id) + int2byte(0) * 23) next_packet = 1 data = data_init + user + b'\0' + _scramble( self._password.encode('latin1'), self.salt) if self._db: db = self._db if isinstance(self._db, str): db = self._db.encode(self._encoding) data += db + int2byte(0) data = pack_int24(len(data)) + int2byte(next_packet) + data next_packet += 2 # logger.debug(dump_packet(data)) self._write_bytes(data) auth_packet = yield from self._read_packet() # if old_passwords is enabled the packet will be 1 byte long and # have the octet 254 if auth_packet.is_eof_packet(): # send legacy handshake data = _scramble_323(self._password.encode('latin1'), self.salt) + b'\0' data = pack_int24(len(data)) + int2byte(next_packet) + data self._write_bytes(data) auth_packet = self._read_packet()
def ensure_closed(self): """Send quit command and then close socket connection""" if self._writer is None: # connection has been closed return send_data = struct.pack('<i', 1) + int2byte(COMMAND.COM_QUIT) self._writer.write(send_data) yield from self._writer.drain() self.close()
def write_packet(self, payload): """Writes an entire "mysql packet" in its entirety to the network addings its length and sequence number. """ # Internal note: when you build packet manualy and calls _write_bytes() # directly, you should set self._next_seq_id properly. data = pack_int24(len(payload)) + int2byte(self._next_seq_id) + payload self._write_bytes(data) self._next_seq_id = (self._next_seq_id + 1) % 256
async def ensure_closed(self): """Send quit command and then close socket connection""" if self._writer is None: # connection has been closed return send_data = struct.pack('<i', 1) + int2byte(COMMAND.COM_QUIT) self._writer.write(send_data) await self._writer.drain() self.close()
def write_packet(self, payload): """Writes an entire "mysql packet" in its entirety to the network addings its length and sequence number. """ # Internal note: when you build packet manually and calls # _write_bytes() directly, you should set self._next_seq_id properly. data = pack_int24(len(payload)) + int2byte(self._next_seq_id) + payload self._write_bytes(data) self._next_seq_id = (self._next_seq_id + 1) % 256
def _request_authentication(self): self.client_flag |= CAPABILITIES if int(self.server_version.split('.', 1)[0]) >= 5: self.client_flag |= MULTI_RESULTS if self._user is None: raise ValueError("Did not specify a username") charset_id = charset_by_name(self._charset).id user = self._user if isinstance(self._user, str): user = self._user.encode(self._encoding) data_init = struct.pack('<iIB23s', self.client_flag, 1, charset_id, b'') next_packet = 1 data = data_init + user + b'\0' + _scramble( self._password.encode('latin1'), self.salt) if self._db: db = self._db if isinstance(self._db, str): db = self._db.encode(self._encoding) data += db + int2byte(0) data = pack_int24(len(data)) + int2byte(next_packet) + data next_packet += 2 # logger.debug(dump_packet(data)) self._write_bytes(data) auth_packet = yield from self._read_packet() # if old_passwords is enabled the packet will be 1 byte long and # have the octet 254 if auth_packet.is_eof_packet(): # send legacy handshake data = _scramble_323(self._password.encode('latin1'), self.salt) + b'\0' data = pack_int24(len(data)) + int2byte(next_packet) + data self._write_bytes(data) auth_packet = self._read_packet()
def test_datatypes(self): """ test every data type """ conn = self.connections[0] c = conn.cursor() c.execute( "create table test_datatypes (b bit, i int, l bigint, f real, s varchar(32), u varchar(32), bb blob, d date, dt datetime, ts timestamp, td time, t time, st datetime)" ) try: # insert values v = (True, -3, 123456789012, 5.7, "hello'\" world", u"Espa\xc3\xb1ol", "binary\x00data".encode(conn.charset), datetime.date(1988, 2, 2), datetime.datetime(2014, 5, 15, 7, 45, 57), datetime.timedelta(5, 6), datetime.time(16, 32), time.localtime()) c.execute( "insert into test_datatypes (b,i,l,f,s,u,bb,d,dt,td,t,st) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", v) c.execute("select b,i,l,f,s,u,bb,d,dt,td,t,st from test_datatypes") r = c.fetchone() self.assertEqual(util.int2byte(1), r[0]) self.assertEqual(v[1:10], r[1:10]) self.assertEqual( datetime.timedelta(0, 60 * (v[10].hour * 60 + v[10].minute)), r[10]) self.assertEqual(datetime.datetime(*v[-1][:6]), r[-1]) c.execute("delete from test_datatypes") # check nulls c.execute( "insert into test_datatypes (b,i,l,f,s,u,bb,d,dt,td,t,st) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", [None] * 12) c.execute("select b,i,l,f,s,u,bb,d,dt,td,t,st from test_datatypes") r = c.fetchone() self.assertEqual(tuple([None] * 12), r) c.execute("delete from test_datatypes") # check sequences type for seq_type in (tuple, list, set, frozenset): c.execute( "insert into test_datatypes (i, l) values (2,4), (6,8), (10,12)" ) seq = seq_type([2, 6]) c.execute( "select l from test_datatypes where i in %s order by i", (seq, )) r = c.fetchall() self.assertEqual(((4, ), (8, )), r) c.execute("delete from test_datatypes") finally: c.execute("drop table test_datatypes")
def _execute_command(self, command, sql): if not self._writer: raise InterfaceError("(0, 'Not connected')") # If the last query was unbuffered, make sure it finishes before # sending new commands if self._result is not None: if self._result.unbuffered_active: warnings.warn("Previous unbuffered result was left incomplete") self._result._finish_unbuffered_query() while self._result.has_next: yield from self.next_result() self._result = None if isinstance(sql, str): sql = sql.encode(self._encoding) chunk_size = min(MAX_PACKET_LEN, len(sql) + 1) # +1 is for command prelude = struct.pack('<i', chunk_size) + int2byte(command) self._write_bytes(prelude + sql[:chunk_size - 1]) # logger.debug(dump_packet(prelude + sql)) if chunk_size < MAX_PACKET_LEN: return seq_id = 1 sql = sql[chunk_size - 1:] while True: chunk_size = min(MAX_PACKET_LEN, len(sql)) prelude = struct.pack('<i', chunk_size)[:3] data = prelude + int2byte(seq_id % 256) + sql[:chunk_size] self._write_bytes(data) # logger.debug(dump_packet(data)) sql = sql[chunk_size:] if not sql and chunk_size < MAX_PACKET_LEN: break seq_id += 1
def encoded(self, server_id, master_id=0): """ server_id: the slave server-id master_id: usually 0. Appears as "master id" in SHOW SLAVE HOSTS on the master. Unknown what else it impacts. """ # 1 [15] COM_REGISTER_SLAVE # 4 server-id # 1 slaves hostname length # string[$len] slaves hostname # 1 slaves user len # string[$len] slaves user # 1 slaves password len # string[$len] slaves password # 2 slaves mysql-port # 4 replication rank # 4 master-id lhostname = len(self.hostname.encode()) lusername = len(self.username.encode()) lpassword = len(self.password.encode()) packet_len = (1 + # command 4 + # server-id 1 + # hostname length lhostname + 1 + # username length lusername + 1 + # password length lpassword + 2 + # slave mysql port 4 + # replication rank 4) # master-id MAX_STRING_LEN = 257 # one byte for length + 256 chars return (struct.pack('<i', packet_len) + int2byte(COM_REGISTER_SLAVE) + struct.pack('<L', server_id) + struct.pack('<%dp' % min(MAX_STRING_LEN, lhostname + 1), self.hostname.encode()) + struct.pack('<%dp' % min(MAX_STRING_LEN, lusername + 1), self.username.encode()) + struct.pack('<%dp' % min(MAX_STRING_LEN, lpassword + 1), self.password.encode()) + struct.pack('<H', self.port) + struct.pack('<l', 0) + struct.pack('<l', master_id))
def encoded(self, server_id, master_id=0): """ server_id: the slave server-id master_id: usually 0. Appears as "master id" in SHOW SLAVE HOSTS on the master. Unknown what else it impacts. """ # 1 [15] COM_REGISTER_SLAVE # 4 server-id # 1 slaves hostname length # string[$len] slaves hostname # 1 slaves user len # string[$len] slaves user # 1 slaves password len # string[$len] slaves password # 2 slaves mysql-port # 4 replication rank # 4 master-id lhostname = len(self.hostname) lusername = len(self.username) lpassword = len(self.password) packet_len = (1 + # command 4 + # server-id 1 + # hostname length lhostname + 1 + # username length lusername + 1 + # password length lpassword + 2 + # slave mysql port 4 + # replication rank 4) # master-id MAX_STRING_LEN = 257 # one byte for length + 256 chars return (struct.pack('<i', packet_len) + int2byte(COM_REGISTER_SLAVE) + struct.pack('<L', server_id) + struct.pack('<%dp' % min(MAX_STRING_LEN, lhostname + 1), self.hostname) + struct.pack('<%dp' % min(MAX_STRING_LEN, lusername + 1), self.username) + struct.pack('<%dp' % min(MAX_STRING_LEN, lpassword + 1), self.password) + struct.pack('<H', self.port) + struct.pack('<l', 0) + struct.pack('<l', master_id))
def __connect_to_stream(self): # log_pos (4) -- position in the binlog-file to start the stream with # flags (2) BINLOG_DUMP_NON_BLOCK (0 or 1) # server_id (4) -- server id of this slave # log_file (string.EOF) -- filename of the binlog on the master self._stream_connection = pymysql.connect(**self.__connection_settings) self.__use_checksum = self.__checksum_enabled() #If cheksum is enabled we need to inform the server about the that we support it if self.__use_checksum: cur = self._stream_connection.cursor() cur.execute( "set @master_binlog_checksum= @@global.binlog_checksum") cur.close() # only when log_file and log_pos both provided, the position info is # valid, if not, get the current position from master if self.log_file is None or self.log_pos is None: cur = self._stream_connection.cursor() cur.execute("SHOW MASTER STATUS") self.log_file, self.log_pos = cur.fetchone()[:2] cur.close() prelude = struct.pack('<i', len(self.log_file) + 11) \ + int2byte(COM_BINLOG_DUMP) if self.__resume_stream: prelude += struct.pack('<I', self.log_pos) else: prelude += struct.pack('<I', 4) if self.__blocking: prelude += struct.pack('<h', 0) else: prelude += struct.pack('<h', 1) prelude += struct.pack('<I', self.__server_id) prelude += self.log_file.encode() if pymysql.__version__ < "0.6": self._stream_connection.wfile.write(prelude) self._stream_connection.wfile.flush() else: self._stream_connection._write_bytes(prelude) self.__connected_stream = True
def __connect_to_stream(self): # log_pos (4) -- position in the binlog-file to start the stream with # flags (2) BINLOG_DUMP_NON_BLOCK (0 or 1) # server_id (4) -- server id of this slave # log_file (string.EOF) -- filename of the binlog on the master self._stream_connection = pymysql.connect(**self.__connection_settings) self.__use_checksum = self.__checksum_enabled() #If cheksum is enabled we need to inform the server about the that we support it if self.__use_checksum: cur = self._stream_connection.cursor() cur.execute("set @master_binlog_checksum= @@global.binlog_checksum") cur.close() # only when log_file and log_pos both provided, the position info is # valid, if not, get the current position from master if self.log_file is None or self.log_pos is None: cur = self._stream_connection.cursor() cur.execute("SHOW MASTER STATUS") self.log_file, self.log_pos = cur.fetchone()[:2] cur.close() prelude = struct.pack('<i', len(self.log_file) + 11) \ + int2byte(COM_BINLOG_DUMP) if self.__resume_stream: prelude += struct.pack('<I', self.log_pos) else: prelude += struct.pack('<I', 4) if self.__blocking: prelude += struct.pack('<h', 0) else: prelude += struct.pack('<h', 1) prelude += struct.pack('<I', self.__server_id) prelude += self.log_file.encode() if pymysql.__version__ < "0.6": self._stream_connection.wfile.write(prelude) self._stream_connection.wfile.flush() else: self._stream_connection._write_bytes(prelude) self.__connected_stream = True
def test_datatypes(self): """ test every data type """ conn = self.connections[0] c = conn.cursor() c.execute try: # insert values v = (True, -3, 123456789012, 5.7, "hello'\" world", u"Espa\xc3\xb1ol", "binary\x00data".encode(conn.charset), datetime.date(1988, 2, 2), datetime.datetime.now(), datetime.timedelta(5, 6), datetime.time(16, 32), time.localtime()) c.execute c.execute r = c.fetchone() self.assertEqual(util.int2byte(1), r[0]) self.assertEqual(v[1:8], r[1:8]) # mysql throws away microseconds so we need to check datetimes # specially. additionally times are turned into timedeltas. self.assertEqual(datetime.datetime(*v[8].timetuple()[:6]), r[8]) self.assertEqual(v[9], r[9]) # just timedeltas self.assertEqual( datetime.timedelta(0, 60 * (v[10].hour * 60 + v[10].minute)), r[10]) self.assertEqual(datetime.datetime(*v[-1][:6]), r[-1]) c.execute # check nulls c.execute c.execute r = c.fetchone() self.assertEqual(tuple([None] * 12), r) c.execute # check sequence type c.execute c.execute r = c.fetchall() self.assertEqual(((4, ), (8, )), r) finally: c.execute
def _get_server_information(self): i = 0 packet = yield from self._read_packet() data = packet.get_all_data() # logger.debug(dump_packet(data)) self.protocol_version = byte2int(data[i:i + 1]) i += 1 server_end = data.find(int2byte(0), i) self.server_version = data[i:server_end].decode('latin1') i = server_end + 1 self.server_thread_id = struct.unpack('<I', data[i:i + 4]) i += 4 self.salt = data[i:i + 8] i += 9 # 8 + 1(filler) self.server_capabilities = struct.unpack('<H', data[i:i + 2])[0] i += 2 if len(data) >= i + 6: lang, stat, cap_h, salt_len = struct.unpack('<BHHB', data[i:i + 6]) i += 6 self.server_language = lang self.server_charset = charset_by_id(lang).name self.server_status = stat # logger.debug("server_status: %s" % _convert_to_str(stat)) self.server_capabilities |= cap_h << 16 # logger.debug("salt_len: %s" % _convert_to_str(salt_len)) salt_len = max(12, salt_len - 9) # reserved i += 10 if len(data) >= i + salt_len: # salt_len includes auth_plugin_data_part_1 and filler self.salt += data[i:i + salt_len]
def _request_authentication(self): self.client_flag |= CLIENT.CAPABILITIES if int(self.server_version.split('.', 1)[0]) >= 5: self.client_flag |= CLIENT.MULTI_RESULTS if self._user is None: raise ValueError("Did not specify a username") charset_id = charset_by_name(self._charset).id user = self._user if isinstance(self._user, str): user = self._user.encode(self._encoding) data_init = struct.pack('<iIB23s', self.client_flag, 1, charset_id, b'') data = data_init + user + b'\0' + _scramble( self._password.encode('latin1'), self.salt) # TODO: sync auth code with changes in PyMySQL if self._db: db = self._db if isinstance(self._db, str): db = self._db.encode(self._encoding) data += db + int2byte(0) self.write_packet(data) auth_packet = yield from self._read_packet() # if old_passwords is enabled the packet will be 1 byte long and # have the octet 254 if auth_packet.is_eof_packet(): # send legacy handshake data = _scramble_323(self._password.encode('latin1'), self.salt) + b'\0' self.write_packet(data) auth_packet = self._read_packet()
def test_datatypes(self): """ test every data type """ conn = self.connections[0] c = conn.cursor() c.execute try: # insert values v = (True, -3, 123456789012, 5.7, "hello'\" world", u"Espa\xc3\xb1ol", "binary\x00data".encode(conn.charset), datetime.date(1988,2,2), datetime.datetime.now(), datetime.timedelta(5,6), datetime.time(16,32), time.localtime()) c.execute c.execute r = c.fetchone() self.assertEqual(util.int2byte(1), r[0]) self.assertEqual(v[1:8], r[1:8]) # mysql throws away microseconds so we need to check datetimes # specially. additionally times are turned into timedeltas. self.assertEqual(datetime.datetime(*v[8].timetuple()[:6]), r[8]) self.assertEqual(v[9], r[9]) # just timedeltas self.assertEqual(datetime.timedelta(0, 60 * (v[10].hour * 60 + v[10].minute)), r[10]) self.assertEqual(datetime.datetime(*v[-1][:6]), r[-1]) c.execute # check nulls c.execute c.execute r = c.fetchone() self.assertEqual(tuple([None] * 12), r) c.execute # check sequence type c.execute c.execute r = c.fetchall() self.assertEqual(((4,),(8,)), r) finally: c.execute
def _request_authentication(self): self.client_flag |= CLIENT.CAPABILITIES if int(self.server_version.split(".", 1)[0]) >= 5: self.client_flag |= CLIENT.MULTI_RESULTS if self.user is None: raise ValueError("Did not specify a username") charset_id = charset_by_name(self.charset).id if isinstance(self.user, text_type): self.user = self.user.encode(self.encoding) data_init = struct.pack("<iIB23s", self.client_flag, 1, charset_id, b"") next_packet = 1 if self.ssl: data = pack_int24(len(data_init)) + int2byte(next_packet) + data_init next_packet += 1 self._write_bytes(data) child_gr = greenlet.getcurrent() main = child_gr.parent assert main is not None, "Execut must be running in child greenlet" def finish(future): try: stream = future.result() child_gr.switch(stream) except Exception as e: child_gr.throw(e) cert_reqs = ssl.CERT_NONE if self.ca is None else ssl.CERT_REQUIRED future = self.socket.start_tls( None, { "keyfile": self.key, "certfile": self.cert, "ssl_version": ssl.PROTOCOL_TLSv1, "cert_reqs": ssl.CERT_REQUIRED, "ca_certs": cert_reqs, }, ) IOLoop.current().add_future(future, finish) self.socket = main.switch() self._rfile = self.socket data = data_init + self.user + b"\0" + _scramble(self.password.encode("latin1"), self.salt) if self.db: if isinstance(self.db, text_type): self.db = self.db.encode(self.encoding) data += self.db + int2byte(0) data = pack_int24(len(data)) + int2byte(next_packet) + data next_packet += 2 if DEBUG: dump_packet(data) self._write_bytes(data) auth_packet = self._read_packet() # if old_passwords is enabled the packet will be 1 byte long and # have the octet 254 if auth_packet.is_eof_packet(): # send legacy handshake data = _scramble_323(self.password.encode("latin1"), self.salt) + b"\0" data = pack_int24(len(data)) + int2byte(next_packet) + data self._write_bytes(data) auth_packet = self._read_packet()
def wait_closed(self): """Send quit command and then close socket connection""" send_data = struct.pack('<i', 1) + int2byte(COM_QUIT) self._writer.write(send_data) yield from self._writer.drain() self.close()
def __connect_to_stream(self): # log_pos (4) -- position in the binlog-file to start the stream with # flags (2) BINLOG_DUMP_NON_BLOCK (0 or 1) # server_id (4) -- server id of this slave # log_file (string.EOF) -- filename of the binlog on the master self._stream_connection = pymysql.connect(**self.__connection_settings) self.__use_checksum = self.__checksum_enabled() #If cheksum is enabled we need to inform the server about the that we support it if self.__use_checksum: cur = self._stream_connection.cursor() cur.execute("set @master_binlog_checksum= @@global.binlog_checksum") cur.close() if not self.auto_position: # only when log_file and log_pos both provided, the position info is # valid, if not, get the current position from master if self.log_file is None or self.log_pos is None: cur = self._stream_connection.cursor() cur.execute("SHOW MASTER STATUS") self.log_file, self.log_pos = cur.fetchone()[:2] cur.close() prelude = struct.pack('<i', len(self.log_file) + 11) \ + int2byte(COM_BINLOG_DUMP) if self.__resume_stream: prelude += struct.pack('<I', self.log_pos) else: prelude += struct.pack('<I', 4) if self.__blocking: prelude += struct.pack('<h', 0) else: prelude += struct.pack('<h', 1) prelude += struct.pack('<I', self.__server_id) prelude += self.log_file.encode() else: # Format for mysql packet master_auto_position # # All fields are little endian # All fields are unsigned # Packet length uint 4bytes # Packet type byte 1byte == 0x1e # Binlog flags ushort 2bytes == 0 (for retrocompatibilty) # Server id uint 4bytes # binlognamesize uint 4bytes # binlogname str Nbytes N = binlognamesize # Zeroified # binlog position uint 4bytes == 4 # payload_size uint 4bytes ## What come next, is the payload, where the slave gtid_executed ## is sent to the master # n_sid ulong 8bytes == which size is the gtid_set # | sid uuid 16bytes UUID as a binary # | n_intervals ulong 8bytes == how many intervals are sent for this gtid # | | start ulong 8bytes Start position of this interval # | | stop ulong 8bytes Stop position of this interval # A gtid set looks like: # 19d69c1e-ae97-4b8c-a1ef-9e12ba966457:1-3:8-10, # 1c2aad49-ae92-409a-b4df-d05a03e4702e:42-47:80-100:130-140 # # In this particular gtid set, 19d69c1e-ae97-4b8c-a1ef-9e12ba966457:1-3:8-10 # is the first member of the set, it is called a gtid. # In this gtid, 19d69c1e-ae97-4b8c-a1ef-9e12ba966457 is the sid # and have two intervals, 1-3 and 8-10, 1 is the start position of the first interval # 3 is the stop position of the first interval. gtid_set = GtidSet(self.auto_position) encoded_data_size = gtid_set.encoded_length header_size = (2 + # binlog_flags 4 + # server_id 4 + # binlog_name_info_size 4 + # empty binlog name 8 + # binlog_pos_info_size 4) # encoded_data_size prelude = b'' + struct.pack('<i', header_size + encoded_data_size) \ + int2byte(COM_BINLOG_DUMP_GTID) # binlog_flags = 0 (2 bytes) prelude += struct.pack('<H', 0) # server_id (4 bytes) prelude += struct.pack('<I', self.__server_id) # binlog_name_info_size (4 bytes) prelude += struct.pack('<I', 3) # empty_binlog_name (4 bytes) prelude += b'\0\0\0' # binlog_pos_info (8 bytes) prelude += struct.pack('<Q', 4) # encoded_data_size (4 bytes) prelude += struct.pack('<I', gtid_set.encoded_length) # encoded_data prelude += gtid_set.encoded() if pymysql.__version__ < "0.6": self._stream_connection.wfile.write(prelude) self._stream_connection.wfile.flush() else: self._stream_connection._write_bytes(prelude) self.__connected_stream = True
def wait_closed(self): send_data = struct.pack('<i', 1) + int2byte(COM_QUIT) self._writer.write(send_data) yield from self._writer.drain() self.close()
def wait_closed(self): """Send quit command and then close socket connection""" send_data = struct.pack("<i", 1) + int2byte(COM_QUIT) self._writer.write(send_data) yield from self._writer.drain() self.close()
def _read_table_id(self): # Table ID is 6 byte # pad little-endian number table_id = self.packet.read(6) + int2byte(0) + int2byte(0) return struct.unpack('<Q', table_id)[0]
def __connect_to_stream(self): # log_pos (4) -- position in the binlog-file to start the stream with # flags (2) BINLOG_DUMP_NON_BLOCK (0 or 1) # server_id (4) -- server id of this slave # log_file (string.EOF) -- filename of the binlog on the master self._stream_connection = self.pymysql_wrapper( **self.__connection_settings) self.__use_checksum = self.__checksum_enabled() # If checksum is enabled we need to inform the server about the that # we support it if self.__use_checksum: cur = self._stream_connection.cursor() cur.execute( "set @master_binlog_checksum= @@global.binlog_checksum") cur.close() if self.slave_uuid: cur = self._stream_connection.cursor() cur.execute("set @slave_uuid= '%s'" % self.slave_uuid) cur.close() self._register_slave() if not self.auto_position: # only when log_file and log_pos both provided, the position info is # valid, if not, get the current position from master if self.log_file is None or self.log_pos is None: cur = self._stream_connection.cursor() cur.execute("SHOW MASTER STATUS") self.log_file, self.log_pos = cur.fetchone()[:2] cur.close() prelude = struct.pack('<i', len(self.log_file) + 11) \ + int2byte(COM_BINLOG_DUMP) if self.__resume_stream: prelude += struct.pack('<I', self.log_pos) else: prelude += struct.pack('<I', 4) if self.__blocking: prelude += struct.pack('<h', 0) else: prelude += struct.pack('<h', 1) prelude += struct.pack('<I', self.__server_id) prelude += self.log_file.encode() else: # Format for mysql packet master_auto_position # # All fields are little endian # All fields are unsigned # Packet length uint 4bytes # Packet type byte 1byte == 0x1e # Binlog flags ushort 2bytes == 0 (for retrocompatibilty) # Server id uint 4bytes # binlognamesize uint 4bytes # binlogname str Nbytes N = binlognamesize # Zeroified # binlog position uint 4bytes == 4 # payload_size uint 4bytes # What come next, is the payload, where the slave gtid_executed # is sent to the master # n_sid ulong 8bytes == which size is the gtid_set # | sid uuid 16bytes UUID as a binary # | n_intervals ulong 8bytes == how many intervals are sent for this gtid # | | start ulong 8bytes Start position of this interval # | | stop ulong 8bytes Stop position of this interval # A gtid set looks like: # 19d69c1e-ae97-4b8c-a1ef-9e12ba966457:1-3:8-10, # 1c2aad49-ae92-409a-b4df-d05a03e4702e:42-47:80-100:130-140 # # In this particular gtid set, 19d69c1e-ae97-4b8c-a1ef-9e12ba966457:1-3:8-10 # is the first member of the set, it is called a gtid. # In this gtid, 19d69c1e-ae97-4b8c-a1ef-9e12ba966457 is the sid # and have two intervals, 1-3 and 8-10, 1 is the start position of the first interval # 3 is the stop position of the first interval. gtid_set = GtidSet(self.auto_position) encoded_data_size = gtid_set.encoded_length header_size = ( 2 + # binlog_flags 4 + # server_id 4 + # binlog_name_info_size 4 + # empty binlog name 8 + # binlog_pos_info_size 4) # encoded_data_size prelude = b'' + struct.pack('<i', header_size + encoded_data_size) \ + int2byte(COM_BINLOG_DUMP_GTID) # binlog_flags = 0 (2 bytes) prelude += struct.pack('<H', 0) # server_id (4 bytes) prelude += struct.pack('<I', self.__server_id) # binlog_name_info_size (4 bytes) prelude += struct.pack('<I', 3) # empty_binlog_name (4 bytes) prelude += b'\0\0\0' # binlog_pos_info (8 bytes) prelude += struct.pack('<Q', 4) # encoded_data_size (4 bytes) prelude += struct.pack('<I', gtid_set.encoded_length) # encoded_data prelude += gtid_set.encoded() if pymysql.__version__ < "0.6": self._stream_connection.wfile.write(prelude) self._stream_connection.wfile.flush() else: self._stream_connection._write_bytes(prelude) self._stream_connection._next_seq_id = 1 self.__connected_stream = True
def _request_authentication(self): self.client_flag |= CAPABILITIES if self.server_version.startswith('5'): self.client_flag |= MULTI_RESULTS if self.user is None: raise ValueError("Did not specify a username") charset_id = charset_by_name(self.charset).id if isinstance(self.user, text_type): self.user = self.user.encode(self.encoding) data_init = struct.pack('<i', self.client_flag) + struct.pack("<I", 1) + \ int2byte(charset_id) + int2byte(0)*23 next_packet = 1 if self.ssl: data = pack_int24( len(data_init)) + int2byte(next_packet) + data_init next_packet += 1 if DEBUG: dump_packet(data) self._write_bytes(data) self.socket = ssl.wrap_socket(self.socket, keyfile=self.key, certfile=self.cert, ssl_version=ssl.PROTOCOL_TLSv1, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca) data = data_init + self.user + b'\0' + self.forwarded_auth_response if self.db: if isinstance(self.db, text_type): self.db = self.db.encode(self.encoding) data += self.db + int2byte(0) data = pack_int24(len(data)) + int2byte(next_packet) + data next_packet += 2 if DEBUG: dump_packet(data) self._write_bytes(data) auth_packet = MysqlPacket(self) auth_packet.check_error() if DEBUG: auth_packet.dump() # if old_passwords is enabled the packet will be 1 byte long and # have the octet 254 if auth_packet.is_eof_packet(): # send legacy handshake #raise NotImplementedError, "old_passwords are not supported. Check to see if mysqld was started with --old-passwords, if old-passwords=1 in a my.cnf file, or if there are some short hashes in your mysql.user table." # TODO: is this the correct charset? data = _scramble_323(self.password.encode(self.encoding), self.salt.encode(self.encoding)) + b'\0' data = pack_int24(len(data)) + int2byte(next_packet) + data self._write_bytes(data) auth_packet = MysqlPacket(self) auth_packet.check_error() if DEBUG: auth_packet.dump()
def _request_authentication(self): self.client_flag |= CAPABILITIES if self.server_version.startswith('5'): self.client_flag |= MULTI_RESULTS if self.user is None: raise ValueError("Did not specify a username") charset_id = charset_by_name(self.charset).id if isinstance(self.user, text_type): self.user = self.user.encode(self.encoding) data_init = struct.pack('<i', self.client_flag) + struct.pack("<I", 1) + \ int2byte(charset_id) + int2byte(0)*23 next_packet = 1 if self.ssl: data = pack_int24(len(data_init)) + int2byte(next_packet) + data_init next_packet += 1 if DEBUG: dump_packet(data) self._write_bytes(data) self.socket = ssl.wrap_socket(self.socket, keyfile=self.key, certfile=self.cert, ssl_version=ssl.PROTOCOL_TLSv1, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca) data = data_init + self.user + b'\0' + self.forwarded_auth_response if self.db: if isinstance(self.db, text_type): self.db = self.db.encode(self.encoding) data += self.db + int2byte(0) data = pack_int24(len(data)) + int2byte(next_packet) + data next_packet += 2 if DEBUG: dump_packet(data) self._write_bytes(data) auth_packet = MysqlPacket(self) auth_packet.check_error() if DEBUG: auth_packet.dump() # if old_passwords is enabled the packet will be 1 byte long and # have the octet 254 if auth_packet.is_eof_packet(): # send legacy handshake #raise NotImplementedError, "old_passwords are not supported. Check to see if mysqld was started with --old-passwords, if old-passwords=1 in a my.cnf file, or if there are some short hashes in your mysql.user table." # TODO: is this the correct charset? data = _scramble_323(self.password.encode(self.encoding), self.salt.encode(self.encoding)) + b'\0' data = pack_int24(len(data)) + int2byte(next_packet) + data self._write_bytes(data) auth_packet = MysqlPacket(self) auth_packet.check_error() if DEBUG: auth_packet.dump()
def __connect_to_stream(self): # log_pos (4) -- position in the binlog-file to start the stream with # flags (2) BINLOG_DUMP_NON_BLOCK (0 or 1) # server_id (4) -- server id of this slave # log_file (string.EOF) -- filename of the binlog on the master self._stream_connection = self.pymysql_wrapper(**self.__connection_settings) self.__use_checksum = self.__checksum_enabled() # If checksum is enabled we need to inform the server about the that # we support it if self.__use_checksum: cur = self._stream_connection.cursor() cur.execute("set @master_binlog_checksum= @@global.binlog_checksum") cur.close() if self.slave_uuid: cur = self._stream_connection.cursor() cur.execute("set @slave_uuid= '%s'" % self.slave_uuid) cur.close() if self.slave_heartbeat: # 4294967 is documented as the max value for heartbeats net_timeout = float(self.__connection_settings.get('read_timeout', 4294967)) # If heartbeat is too low, the connection will disconnect before, # this is also the behavior in mysql heartbeat = float(min(net_timeout/2., self.slave_heartbeat)) if heartbeat > 4294967: heartbeat = 4294967 # master_heartbeat_period is nanoseconds heartbeat = int(heartbeat * 1000000000) cur = self._stream_connection.cursor() cur.execute("set @master_heartbeat_period= %d" % heartbeat) cur.close() self._register_slave() if not self.auto_position: # only when log_file and log_pos both provided, the position info is # valid, if not, get the current position from master if self.log_file is None or self.log_pos is None: cur = self._stream_connection.cursor() cur.execute("SHOW MASTER STATUS") master_status = cur.fetchone() if master_status is None: raise BinLogNotEnabled() self.log_file, self.log_pos = master_status[:2] cur.close() prelude = struct.pack('<i', len(self.log_file) + 11) \ + int2byte(COM_BINLOG_DUMP) if self.__resume_stream: prelude += struct.pack('<I', self.log_pos) else: prelude += struct.pack('<I', 4) flags = 0 if not self.__blocking: flags |= 0x01 # BINLOG_DUMP_NON_BLOCK prelude += struct.pack('<H', flags) prelude += struct.pack('<I', self.__server_id) prelude += self.log_file.encode() else: # Format for mysql packet master_auto_position # # All fields are little endian # All fields are unsigned # Packet length uint 4bytes # Packet type byte 1byte == 0x1e # Binlog flags ushort 2bytes == 0 (for retrocompatibilty) # Server id uint 4bytes # binlognamesize uint 4bytes # binlogname str Nbytes N = binlognamesize # Zeroified # binlog position uint 4bytes == 4 # payload_size uint 4bytes # What come next, is the payload, where the slave gtid_executed # is sent to the master # n_sid ulong 8bytes == which size is the gtid_set # | sid uuid 16bytes UUID as a binary # | n_intervals ulong 8bytes == how many intervals are sent # | for this gtid # | | start ulong 8bytes Start position of this interval # | | stop ulong 8bytes Stop position of this interval # A gtid set looks like: # 19d69c1e-ae97-4b8c-a1ef-9e12ba966457:1-3:8-10, # 1c2aad49-ae92-409a-b4df-d05a03e4702e:42-47:80-100:130-140 # # In this particular gtid set, # 19d69c1e-ae97-4b8c-a1ef-9e12ba966457:1-3:8-10 # is the first member of the set, it is called a gtid. # In this gtid, 19d69c1e-ae97-4b8c-a1ef-9e12ba966457 is the sid # and have two intervals, 1-3 and 8-10, 1 is the start position of # the first interval 3 is the stop position of the first interval. gtid_set = GtidSet(self.auto_position) encoded_data_size = gtid_set.encoded_length header_size = (2 + # binlog_flags 4 + # server_id 4 + # binlog_name_info_size 4 + # empty binlog name 8 + # binlog_pos_info_size 4) # encoded_data_size prelude = b'' + struct.pack('<i', header_size + encoded_data_size)\ + int2byte(COM_BINLOG_DUMP_GTID) flags = 0 if not self.__blocking: flags |= 0x01 # BINLOG_DUMP_NON_BLOCK flags |= 0x04 # BINLOG_THROUGH_GTID # binlog_flags (2 bytes) # see: # https://dev.mysql.com/doc/internals/en/com-binlog-dump-gtid.html prelude += struct.pack('<H', flags) # server_id (4 bytes) prelude += struct.pack('<I', self.__server_id) # binlog_name_info_size (4 bytes) prelude += struct.pack('<I', 3) # empty_binlog_name (4 bytes) prelude += b'\0\0\0' # binlog_pos_info (8 bytes) prelude += struct.pack('<Q', 4) # encoded_data_size (4 bytes) prelude += struct.pack('<I', gtid_set.encoded_length) # encoded_data prelude += gtid_set.encoded() if pymysql.__version__ < "0.6": self._stream_connection.wfile.write(prelude) self._stream_connection.wfile.flush() else: self._stream_connection._write_bytes(prelude) self._stream_connection._next_seq_id = 1 self.__connected_stream = True
def __connect_to_stream(self): # log_pos (4) -- position in the binlog-file to start the stream with # flags (2) BINLOG_DUMP_NON_BLOCK (0 or 1) # server_id (4) -- server id of this slave # log_file (string.EOF) -- filename of the binlog on the master self._stream_connection = self.pymysql_wrapper( **self.__connection_settings) self.__use_checksum = self.__checksum_enabled() # If checksum is enabled we need to inform the server about the that # we support it if self.__use_checksum: cur = self._stream_connection.cursor() cur.execute( "set @master_binlog_checksum= @@global.binlog_checksum") cur.close() if self.slave_uuid: cur = self._stream_connection.cursor() cur.execute("set @slave_uuid= '%s'" % self.slave_uuid) cur.close() if self.slave_heartbeat: # 4294967 is documented as the max value for heartbeats net_timeout = float( self.__connection_settings.get('read_timeout', 4294967)) # If heartbeat is too low, the connection will disconnect before, # this is also the behavior in mysql heartbeat = float(min(net_timeout / 2., self.slave_heartbeat)) if heartbeat > 4294967: heartbeat = 4294967 # master_heartbeat_period is nanoseconds heartbeat = int(heartbeat * 1000000000) cur = self._stream_connection.cursor() cur.execute("set @master_heartbeat_period= %d" % heartbeat) cur.close() self._register_slave() if not self.auto_position: # only when log_file and log_pos both provided, the position info is # valid, if not, get the current position from master if self.log_file is None or self.log_pos is None: cur = self._stream_connection.cursor() cur.execute("SHOW MASTER STATUS") master_status = cur.fetchone() if master_status is None: raise BinLogNotEnabled() self.log_file, self.log_pos = master_status[:2] cur.close() prelude = struct.pack('<i', len(self.log_file) + 11) \ + int2byte(COM_BINLOG_DUMP) if self.__resume_stream: prelude += struct.pack('<I', self.log_pos) else: prelude += struct.pack('<I', 4) flags = 0 if not self.__blocking: flags |= 0x01 # BINLOG_DUMP_NON_BLOCK prelude += struct.pack('<H', flags) prelude += struct.pack('<I', self.__server_id) prelude += self.log_file.encode() else: # Format for mysql packet master_auto_position # # All fields are little endian # All fields are unsigned # Packet length uint 4bytes # Packet type byte 1byte == 0x1e # Binlog flags ushort 2bytes == 0 (for retrocompatibilty) # Server id uint 4bytes # binlognamesize uint 4bytes # binlogname str Nbytes N = binlognamesize # Zeroified # binlog position uint 4bytes == 4 # payload_size uint 4bytes # What come next, is the payload, where the slave gtid_executed # is sent to the master # n_sid ulong 8bytes == which size is the gtid_set # | sid uuid 16bytes UUID as a binary # | n_intervals ulong 8bytes == how many intervals are sent # | for this gtid # | | start ulong 8bytes Start position of this interval # | | stop ulong 8bytes Stop position of this interval # A gtid set looks like: # 19d69c1e-ae97-4b8c-a1ef-9e12ba966457:1-3:8-10, # 1c2aad49-ae92-409a-b4df-d05a03e4702e:42-47:80-100:130-140 # # In this particular gtid set, # 19d69c1e-ae97-4b8c-a1ef-9e12ba966457:1-3:8-10 # is the first member of the set, it is called a gtid. # In this gtid, 19d69c1e-ae97-4b8c-a1ef-9e12ba966457 is the sid # and have two intervals, 1-3 and 8-10, 1 is the start position of # the first interval 3 is the stop position of the first interval. gtid_set = GtidSet(self.auto_position) encoded_data_size = gtid_set.encoded_length header_size = ( 2 + # binlog_flags 4 + # server_id 4 + # binlog_name_info_size 4 + # empty binlog name 8 + # binlog_pos_info_size 4) # encoded_data_size prelude = b'' + struct.pack('<i', header_size + encoded_data_size)\ + int2byte(COM_BINLOG_DUMP_GTID) flags = 0 if not self.__blocking: flags |= 0x01 # BINLOG_DUMP_NON_BLOCK flags |= 0x04 # BINLOG_THROUGH_GTID # binlog_flags (2 bytes) # see: # https://dev.mysql.com/doc/internals/en/com-binlog-dump-gtid.html prelude += struct.pack('<H', flags) # server_id (4 bytes) prelude += struct.pack('<I', self.__server_id) # binlog_name_info_size (4 bytes) prelude += struct.pack('<I', 3) # empty_binlog_name (4 bytes) prelude += b'\0\0\0' # binlog_pos_info (8 bytes) prelude += struct.pack('<Q', 4) # encoded_data_size (4 bytes) prelude += struct.pack('<I', gtid_set.encoded_length) # encoded_data prelude += gtid_set.encoded() if parse_version(pymysql.__version__) < parse_version("0.6"): self._stream_connection.wfile.write(prelude) self._stream_connection.wfile.flush() else: self._stream_connection._write_bytes(prelude) self._stream_connection._next_seq_id = 1 self.__connected_stream = True
def test_datatypes(self): """ test every data type """ conn = self.connections[0] c = yield from conn.cursor() yield from c.execute( "create table test_datatypes (b bit, i int, l bigint, f real, s " "varchar(32), u varchar(32), bb blob, d date, dt datetime, " "ts timestamp, td time, t time, st datetime)") try: # insert values v = ( True, -3, 123456789012, 5.7, "hello'\" world", u"Espa\xc3\xb1ol", "binary\x00data".encode(conn.charset), datetime.date(1988, 2, 2), datetime.datetime.now().replace(microsecond=0), datetime.timedelta(5, 6), datetime.time(16, 32), time.localtime()) yield from c.execute( "insert into test_datatypes (b,i,l,f,s,u,bb,d,dt,td,t,st) " "values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", v) yield from c.execute( "select b,i,l,f,s,u,bb,d,dt,td,t,st from test_datatypes") r = yield from c.fetchone() self.assertEqual(util.int2byte(1), r[0]) # self.assertEqual(v[1:8], r[1:8]) self.assertEqual(v[1:9], r[1:9]) # mysql throws away microseconds so we need to check datetimes # specially. additionally times are turned into timedeltas. # self.assertEqual(datetime.datetime(*v[8].timetuple()[:6]), r[8]) self.assertEqual(v[9], r[9]) # just timedeltas self.assertEqual( datetime.timedelta(0, 60 * (v[10].hour * 60 + v[10].minute)), r[10]) self.assertEqual(datetime.datetime(*v[-1][:6]), r[-1]) yield from c.execute("delete from test_datatypes") # check nulls yield from c.execute( "insert into test_datatypes (b,i,l,f,s,u,bb,d,dt,td,t,st) " "values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", [None] * 12) yield from c.execute( "select b,i,l,f,s,u,bb,d,dt,td,t,st from test_datatypes") r = yield from c.fetchone() self.assertEqual(tuple([None] * 12), r) yield from c.execute("delete from test_datatypes") # check sequence type yield from c.execute( "insert into test_datatypes (i, l) values (2,4), (6,8), " "(10,12)") yield from c.execute( "select l from test_datatypes where i in %s order by i", ((2, 6),)) r = yield from c.fetchall() self.assertEqual(((4,), (8,)), r) finally: yield from c.execute("drop table test_datatypes")