def _request(self, action='GET', url='/', data=None, query_params=None): if data is None: data = {} if query_params is None: query_params = {} default_headers = { 'Accept': 'application/json', 'Content-Type': 'application/json', 'x-dnsme-apiKey': self.options['auth_username'] } default_auth = None # Date string in HTTP format e.g. Sat, 12 Feb 2011 20:59:04 GMT request_date = formatdate(usegmt=True) hashed = hmac.new(bytes(self.options['auth_token'], 'ascii'), bytes(request_date, 'ascii'), sha1) default_headers['x-dnsme-requestDate'] = request_date default_headers['x-dnsme-hmac'] = hashed.hexdigest() r = requests.request(action, self.api_endpoint + url, params=query_params, data=json.dumps(data), headers=default_headers, auth=default_auth) r.raise_for_status() # if the request fails for any reason, throw an error. # PUT and DELETE actions dont return valid json. if action == 'DELETE' or action == 'PUT': return r.text return r.json()
def test_unpacking_many(self): """ Test if multiple messages are correctly unpacked from a binary string ot an array of objects """ msg = self.get_foo_msg() packed = ( struct.pack("!B", msg.__class__.id) + struct.pack("!B", 2) + struct.pack("!I", 1) + struct.pack("!I", 3) ) msg = self.get_bar_msg() string_length = len(bytes(msg["name"], "utf-8")) packed += ( struct.pack("!B", msg.__class__.id) + struct.pack("!I", string_length) + struct.pack("!{}s".format(string_length), bytes(msg["name"], "utf-8")) + struct.pack("!H", 42) ) msg = self.get_vector_msg() packed += struct.pack("!B", msg.__class__.id) + struct.pack("!f", 1) + struct.pack("!f", 7.77) unpacked = unpack_messages(packed, self.factory) self.assertEqual(unpacked[0].__class__.__name__, "FooMessage") self.assertEqual(unpacked[1].__class__.__name__, "BarMessage") self.assertEqual(unpacked[2].__class__.__name__, "VectorMessage") self.assertEqual(unpacked[0]["direction"], "south") self.assertEqual(unpacked[0]["x"], 1) self.assertEqual(unpacked[0]["y"], 3) self.assertEqual(unpacked[1]["name"], u"Yoda") self.assertEqual(unpacked[1]["score"], 42) self.assertEqual(unpacked[2]["x"], 1) self.assertAlmostEqual(unpacked[2]["y"], 7.77, places=2)
def __init__(self, *, bytes=None, file=None, hexfile=None): import builtins if [bytes, file, hexfile].count(None) < 2: raise RuntimeError('only one of bytes, file or hexfile' ' must be specified') if bytes is not None: if not isinstance(bytes, (builtins.bytes, bytearray)): raise TypeError('invalid bytes parameter') elif len(bytes) != 64: raise ValueError('byte array must be 64 bytes long') self._bytes = builtins.bytes(bytes) elif file is not None: # file can be any of a file object, a filename or a file descriptor self._bytes = self._load(file, 'rb') elif hexfile is not None: # hexfile can be any of a file object, a filename or a file # descriptor content = self._load(hexfile, 'r', 1024) content = (content.replace(b'\r', b'') .replace(b'\n', b'') .replace(b' ', b'') .replace(b'\t', b'')) self._bytes = builtins.bytes(bytearray.fromhex(content.decode())) else: self._bytes = builtins.bytes(64) assert type(self._bytes) is builtins.bytes assert len(self._bytes) == 64 self._page_view_proxy = None
def bytes(text): """ Convert Unicode text to UTF-8 encoded bytes. Since Python 2.6+ and Python 3+ have similar but incompatible signatures, this function unifies the two to keep code sane. :param text: Unicode text to convert to bytes :rtype: bytes (Python3), str (Python2.6+) """ if text is None: return b'' if sys.version_info < (3, 0): import __builtin__ return __builtin__.bytes(text) else: import builtins if isinstance(text, builtins.bytes): # We already have bytes, so do nothing return text if isinstance(text, list): # Convert a list of integers to bytes return builtins.bytes(text) else: # Convert UTF-8 text to bytes return builtins.bytes(text, encoding='utf-8')
def decrypt(self, data): # FIXME: implement proper handling of KEYS keys = { '\x00\x00\x03\x11': b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F' } devid = ''.join(chr(b) for b in self.id_nr_field[::-1]) key = keys.get(devid, None) if key is None: return False if self.encryption_mode == 5: orig_len = len(data) spec = AES.new(key, AES.MODE_CBC, bytes(self.crypto_iv)) data = data + ((16 - orig_len % 16) * [self.PADDING_BYTE]) data = [ int(x) for x in bytearray(spec.decrypt(bytes(data))) ] if data[0:2] != [0x2F, 0x2F]: return False # raise Exception("Decryption failed") return data[:orig_len] return None
def p_encaps_list_string(p): 'encaps_list : encaps_list ENCAPSED_AND_WHITESPACE' if p[1] == '': p[0] = bytes(p[2], 'utf-8').decode('unicode_escape') else: p[0] = ast.BinaryOp('.', p[1], bytes(p[2], 'utf-8').decode('unicode_escape'), lineno=p.lineno(2))
def read_raw(self, size=-1): """ Gets desired response command from stdin. If ``stdin`` is `None`, then the user will be prompted to enter a mock response in the Python interpreter. :param int size: Number of characters to read. Default value of -1 will read until termination character is found. :rtype: `bytes` """ if self._stdin is not None: if size == -1 or size is None: result = bytes() if self._terminator: while result.endswith(self._terminator.encode("utf-8")) is False: c = self._stdin.read(1) if c == b'': break result += c return result[:-len(self._terminator)] return self._stdin.read(-1) elif size >= 0: input_var = self._stdin.read(size) return bytes(input_var) else: raise ValueError("Must read a positive value of characters.") else: input_var = input("Desired Response: ") return input_var
def bytes(stack, stash): errors, encoding, string = [pythonify(stack.pop()) for _ in range(3)] if errors is None: stack.append(concatify(builtins.bytes(string, encoding))) else: stack.append( concatify(builtins.bytes(string, encoding, errors)))
def test_parse_detector_list(self): print('=== Testing parse_detector_list() ===') shutil.copyfile(recon_folder+b'/data/det_sim.dat', recon_folder+b'/data/det_sim_test.dat') list_fname = 'test_det_list.txt' det = detector.detector() with open(list_fname, 'w') as f: f.writelines([(recon_folder+b'/data/det_sim.dat\n').decode('utf-8'), (recon_folder+b'/data/det_sim.dat\n').decode('utf-8')]) self.assertAlmostEqual(det.parse_detector_list(bytes(list_fname, 'utf-8')), 70.32817314646061) self.assertEqual(det.num_dfiles, 2) self.det_sim_tests(det) self.assertIs(det.nth_det(1), None) det = detector.detector() with open(list_fname, 'w') as f: f.writelines([(recon_folder+b'/data/det_sim.dat\n').decode('utf-8'), (recon_folder+b'/data/det_sim_test.dat\n').decode('utf-8')]) det.parse_detector_list(bytes(list_fname, 'utf-8')) self.assertEqual(det.num_dfiles, 2) self.assertEqual(det.num_det, 2) npt.assert_array_equal(det.mapping, [0,1]+1022*[0]) self.det_sim_tests(det, single=False) self.det_sim_tests(det.nth_det(1), single=False) self.assertIs(det.nth_det(2), None) os.remove(list_fname) os.remove(recon_folder+b'/data/det_sim_test.dat')
def run(self): while not self.terminate.is_set(): (rd, _, _) = select([self._socket], [], [], 1) if not rd: continue next_byte = self._socket.recv(1) if next_byte[0] == 0x55: #End message marker joined_buffer = reduce(lambda r, x: r + x, self.input_buffer, bytes()) log.debug("Received raw message %s", "".join( ["%02x" % x for x in joined_buffer])) self.parse_message(joined_buffer) self.input_buffer = [] else: if next_byte[0] == 0xAA: #There has to be a following byte for the escape byte #TODO: do something to prevent program from hanging here # if input is not protocol conformant next_next_byte = self._socket.recv(1) if next_next_byte[0] == 0x01: next_byte = bytes([0x55]) elif next_next_byte[0] == 0x02: next_byte = bytes([0xAA]) else: #TODO: Error, escape sequence unknown pass self.input_buffer.append(next_byte)
def test_packing_many(self): """ Test if an array of messages is correctly packed into a binary string """ messages = [] msg1 = self.get_foo_msg() messages.append(msg1) our_packed = ( struct.pack("!B", msg1.__class__.id) + struct.pack("!B", 2) + struct.pack("!I", 1) + struct.pack("!I", 3) ) msg2 = self.get_bar_msg() messages.append(msg2) string_length = len(bytes(msg2["name"], "utf-8")) our_packed += ( struct.pack("!B", msg2.__class__.id) + struct.pack("!I", string_length) + struct.pack("!{}s".format(string_length), bytes(msg2["name"], "utf-8")) + struct.pack("!H", 42) ) msg3 = self.get_vector_msg() messages.append(msg3) our_packed += struct.pack("!B", msg3.__class__.id) + struct.pack("!f", 1) + struct.pack("!f", 7.77) their_packed = pack_messages(messages) self.assertEqual(our_packed, their_packed)
def readAddr( self, addr ): self._file.seek(addr + self._ADDR_DELTA) if 4 == self._POINTER_SIZE: return unpack(self._ENDIANITY + 'L', bytes(self._file.read(4)))[0] elif 8 == self._POINTER_SIZE: return unpack(self._ENDIANITY + 'Q', bytes(self._file.read(8)))[0] else: raise Exception("Unknown pointer size")
def to_bytes(input, encoding='utf-8'): if isinstance(input, bytes): return bytes(input) if isinstance(input, str): return bytes(input, encoding) raise ValueError("Invalid input, expected string or bytes")
def relocate_boot_catalog(self, isofile): """ Move ISO boot catalog to the standardized place Check location of the boot catalog and move it to the place where all BIOS and firwmare implementations expects it :param str isofile: path to the ISO file """ iso_metadata = Iso._read_iso_metadata(isofile) Iso._validate_iso_metadata(iso_metadata) with open(isofile, 'rb+') as iso: new_boot_catalog_sector = iso_metadata.path_table_sector - 1 new_volume_descriptor = Iso._read_iso_sector( new_boot_catalog_sector - 1, iso ) new_volume_id = Iso._sub_string( data=new_volume_descriptor, length=7 ) if bytes(b'CD001') not in new_volume_id: new_boot_catalog_sector = None ref_sector = iso_metadata.boot_catalog_sector for sector in range(0x12, 0x40): new_volume_descriptor = Iso._read_iso_sector(sector, iso) new_volume_id = Iso._sub_string( data=new_volume_descriptor, length=7 ) if (bytes(b'TEA01') in new_volume_id or sector + 1 == ref_sector): new_boot_catalog_sector = sector + 1 break if ( new_boot_catalog_sector and iso_metadata.boot_catalog_sector != new_boot_catalog_sector ): new_boot_catalog = Iso._read_iso_sector( new_boot_catalog_sector, iso ) empty_catalog = bytes(b'\x00') * 0x800 if new_boot_catalog == empty_catalog: eltorito_descriptor = Iso._embed_string_in_segment( data=iso_metadata.eltorito_descriptor, string=struct.pack('<I', new_boot_catalog_sector), length=4, start=0x47 ) Iso._write_iso_sector( new_boot_catalog_sector, iso_metadata.boot_catalog, iso ) Iso._write_iso_sector( 0x11, eltorito_descriptor, iso ) log.debug( 'Relocated boot catalog from sector 0x%x to 0x%x', iso_metadata.boot_catalog_sector, new_boot_catalog_sector )
def fix_boot_catalog(self, isofile): """ Fixup inconsistencies in boot catalog Make sure all catalog entries are in correct order and provide complete metadata information e.g catalog name :param string isofile: path to the ISO file """ iso_metadata = Iso._read_iso_metadata(isofile) Iso._validate_iso_metadata(iso_metadata) boot_catalog = iso_metadata.boot_catalog first_catalog_entry = Iso._sub_string( data=boot_catalog, length=32, start=32 ) first_catalog_entry = Iso._embed_string_in_segment( data=first_catalog_entry, string=struct.pack('B19s', 1, bytes(b'Legacy (isolinux)')), length=20, start=12 ) boot_catalog = Iso._embed_string_in_segment( data=boot_catalog, string=first_catalog_entry, length=32, start=32 ) second_catalog_entry = Iso._sub_string( data=boot_catalog, length=32, start=64 ) second_catalog_entry = Iso._embed_string_in_segment( data=second_catalog_entry, string=struct.pack('B19s', 1, bytes(b'UEFI (grub)')), length=20, start=12 ) second_catalog_entry_sector = second_catalog_entry[0] if second_catalog_entry_sector == 0x88: boot_catalog = Iso._embed_string_in_segment( data=boot_catalog, string=second_catalog_entry, length=32, start=96 ) second_catalog_entry = struct.pack( 'BBH28s', 0x91, 0xef, 1, bytes(b'') ) boot_catalog = Iso._embed_string_in_segment( data=boot_catalog, string=second_catalog_entry, length=32, start=64 ) with open(isofile, 'rb+') as iso: Iso._write_iso_sector( iso_metadata.boot_catalog_sector, boot_catalog, iso ) log.debug('Fixed iso catalog contents')
def test_encode_request_to_cebuana(self): request_data = ('283JDIKSJWL4', 'KS893K2K3K4', '283JDIKS00L4') expected_encoded_data = (b"""<?xml version="1.0" encoding="utf-8"?><soap12:Envelope xmlns:soap12="http://www.w3.org/2003/05/soap-envelope" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><soap12:Body><requestInquiry xmlns="http://www.bankcom.com.ph/"><transaction>283JDIKSJWL4</transaction></requestInquiry></soap12:Body></soap12:Envelope>""", b"""<?xml version="1.0" encoding="utf-8"?><soap12:Envelope xmlns:soap12="http://www.w3.org/2003/05/soap-envelope" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><soap12:Body><requestInquiry xmlns="http://www.bankcom.com.ph/"><transaction>KS893K2K3K4</transaction></requestInquiry></soap12:Body></soap12:Envelope>""", b"""<?xml version="1.0" encoding="utf-8"?><soap12:Envelope xmlns:soap12="http://www.w3.org/2003/05/soap-envelope" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><soap12:Body><requestInquiry xmlns="http://www.bankcom.com.ph/"><transaction>283JDIKS00L4</transaction></requestInquiry></soap12:Body></soap12:Envelope>""", ) self.assertEquals(self.CebuanaRemittance.encode_request(request_data[0]), bytes(expected_encoded_data[0])) self.assertEquals(self.CebuanaRemittance.encode_request(request_data[1]), bytes(expected_encoded_data[1])) self.assertEquals(self.CebuanaRemittance.encode_request(request_data[2]), bytes(expected_encoded_data[2]))
def _fold_md4_or_md5(digest): if len(digest) < 16: raise ValueError('digest is too short') result = b'' for i in range(0, 8): one = ord(bytes(digest[i])) two = ord(bytes(digest[i+8])) result = result + bytes([one^two]) return result
def convert_to_bytes(options): if sys.version_info >= (3, 0): for key in list(options.keys()): try: if bytes(key, 'utf-8') != key: options[bytes(key, 'utf-8')] = options[key] options.pop(key) except TypeError: pass
def test_iso_metadata_path_table_sector_invalid(self, mock_open): mock_open.return_value = self.context_manager_mock read_results = [bytes(b'EL TORITO SPECIFICATION'), bytes(b'CD001')] def side_effect(arg): return read_results.pop() self.file_mock.read.side_effect = side_effect Iso.fix_boot_catalog('isofile')
def setup(self): self.data_flow = [True, None, None, None, None, None, None] self.data_out = [bytes(b''), bytes(b'\n'), bytes(b'a'), bytes(b't'), bytes(b'a'), bytes(b'd')] self.data_err = [bytes(b''), bytes(b'r'), bytes(b'o'), bytes(b'r'), bytes(b'r'), bytes(b'e')] self.flow = self.create_flow_method(self.poll) self.flow_out_available = self.create_flow_method(self.outavailable) self.flow_err_available = self.create_flow_method(self.erravailable) self.flow_out = self.create_flow_method(self.outdata) self.flow_err = self.create_flow_method(self.errdata)
def run(self, command, custom_env=None, raise_on_error=True): """ Execute a program and block the caller. The return value is a hash containing the stdout, stderr and return code information. Unless raise_on_error is set to false an exception is thrown if the command exits with an error code not equal to zero :param list command: command and arguments :param list custom_env: custom os.environ :param bool raise_on_error: control error behaviour :return: (string).output :return: (string).error :return: (int).returncode :rtype: tuple """ from .logger import log log.debug('EXEC: [%s]', ' '.join(command)) environment = os.environ if custom_env: environment = custom_env try: process = subprocess.Popen( command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=environment ) except Exception as e: raise KiwiCommandError( '%s: %s: %s' % (command[0], type(e).__name__, format(e)) ) output, error = process.communicate() if process.returncode != 0 and not error: error = bytes(b'(no output on stderr)') if process.returncode != 0 and not output: output = bytes(b'(no output on stdout)') if process.returncode != 0 and raise_on_error: log.debug( 'EXEC: Failed with stderr: %s, stdout: %s', error.decode(), output.decode() ) raise KiwiCommandError( '%s: stderr: %s, stdout: %s' % ( command[0], error.decode(), output.decode() ) ) command = namedtuple( 'command', ['output', 'error', 'returncode'] ) return command( output=output.decode(), error=error.decode(), returncode=process.returncode )
def execute_command(self, command): self.tn.write(bytes(command + '\n', 'utf-8')) s = self.tn.read_until(bytes(self.prompt, 'utf-8')) if s.endswith(b'[JS] $ '): s = s[len(command) + 2:-8] elif s.endswith(b'[RS.edu] $ '): s = s[len(command) + 2:-12] else: s = s[len(command) + 2:-4] return s
def setUp(self): f = tempfile.NamedTemporaryFile(delete=False) f.write(bytes(self._test_task, 'UTF-8')) f.close() self.taskfile = f.name f = tempfile.NamedTemporaryFile(delete=False) f.write(bytes(self.taskfile, 'UTF-8')) f.close() self.listfile = f.name
def test_iso_metadata_catalog_sector_invalid(self, mock_open): mock_open.return_value = self.context_manager_mock volume_descriptor = \ bytes(b'CD001') + bytes(b'_') * (0x08c - 0x5) + bytes(b'0x1d5f23a') read_results = [bytes(b'EL TORITO SPECIFICATION'), volume_descriptor] def side_effect(arg): return read_results.pop() self.file_mock.read.side_effect = side_effect Iso.fix_boot_catalog('isofile')
def test_get_shard_name(self): for shard_num in range(0, self.tr.num_shards()): expected_shard_name = shard_name_format.format(shard_num) shard_name = self.tr.get_shard_name(shard_num) # verify the shard name is what we expect self.assertEqual(expected_shard_name, shard_name) # verify the shard contains the shard num and name we expect. shard = self.tr.get_shard_by_num(shard_num) self.assertEqual(bytes(str(shard_num), 'utf8'), shard.get('shard_num')) self.assertEqual(bytes(expected_shard_name, 'utf8'), shard.get('shard_name'))
def test_auto_sharding_shard_numbers(self): self.tr.set('12345', 'bananas') shard = self.tr.get_shard_by_key('12345') # verify we get back what we wrote self.assertEqual(bytes('bananas', 'utf8'), shard.get('12345')) # get the shard number and verify it's what the shard says shard_num = self.tr.get_shard_num_by_key('12345') self.assertEqual(bytes(str(shard_num), 'utf8'), shard.get('shard_num')) # verify te shard name is what we're expecting shard_name = shard_name_format.format(shard_num) self.assertEqual(bytes(shard_name, 'utf8'), shard.get('shard_name'))
def send_message(self, avatar_message): serialized_message = avatar_message.serialize() log.info("Sending message %s" % str(avatar_message)) log.debug("Sending serialized message %s", "".join(["%02x" % x for x in serialized_message])) escaped_message = serialized_message.replace(bytes([0xAA]), bytes([0xAA, 0x02])). \ replace(bytes([0x55]), bytes([0xAA, 0x01])) crc = Crc8(serialized_message).get_crc() escaped_crc = bytes([crc & 0xFF]).replace(bytes([0xAA]), bytes([0xAA, 0x02])). \ replace(bytes([0x55]), bytes([0xAA, 0x01])) raw_message = escaped_message + escaped_crc + bytes([0x55]) self._socket.send(raw_message) log.debug("Sending raw message %s", "".join(["%02x" % x for x in raw_message]))
def test_write_msg(self): self.command_reg.fields.parse_map([0, 0, 0]) self.assertEqual(self.command_reg.fields.device_index, 0) self.assertEqual(self.command_reg.fields.device_cmd, 0) msg = self.command_reg.get_write_cmd_msg() self.assertTrue(type(msg), list) self.assertIsInstance(msg[0], txrx.TxMessage) self.assertEqual(len(msg), 3) expected_msg = [bytes('\x03\x3A\x00\x00\x00\x00', encoding='latin-1'), bytes('\x03\x3B\x00\x00\x00\x00', encoding='latin-1'), bytes('\x03\x3C\x00\x00\x00\x00', encoding='latin-1')] for i in range(3): self.assertEqual(msg[i].message, expected_msg[i], msg[i].message)
def TestFunctions(self): # Send a valid system command self.system.send_command(const.SystemCmd.disable_global_monitoring) # Verify the correct TxMessages are sent to the socket calls = self.txrx.send_recv_message.mock_calls self.assertEqual(calls[0], call( TxMessage(bytes("\x03\x3C\x00\x00\x00\x00", encoding="latin-1"), expect_eom=True))) self.assertEqual(calls[1], call( TxMessage(bytes("\x03\x3C\x00\x02\x00\x00", encoding="latin-1"), expect_eom=True))) # Send a command that is not a system command type # Check that a TypeError is raised with self.assertRaises(TypeError): self.system.send_command(5)
def uuid2long(uid): """ UUID is 128 bits (32 bytes). Unpack the 32 bytes into two 16 byte longs. For CAOM-2.0 compatibility only the least significant 16 bytes in the UUID should have a value. return the UUID least significant bytes as a long. """ longs = struct.unpack(str('>qq'), bytes(uid.bytes)) if longs[0] != 0: longs = struct.unpack(str('>QQ'), bytes(uid.bytes)) return (longs[0] << 64) | longs[1] else: return longs[1]
def set_password(self, value): if value: fernet = get_fernet() self._password = fernet.encrypt(bytes(value, 'utf-8')).decode() self.is_encrypted = fernet.is_encrypted
def high_nibble(self, nibble): self.port.write(bytes([0x30 | nibble])) self.timeout(0.1) return self.response(1, True)
def cfg_spi(self, spi_cfg): self.port.write(bytes([0x80 | spi_cfg])) self.timeout(0.1) return self.response()
def printable(string): if PY3 and isinstance(string, bytes): return bytes(c if 32 <= c <= 126 else 46 for c in string) return "".join(c if ' ' <= c <= '~' else '.' for c in string)
# See if the test case in the "test case.json" file can be processed correctly. from petlib.bn import Bn from petlib.ec import POINT_CONVERSION_UNCOMPRESSED from sphinxmix.SphinxParams import SphinxParams from sphinxmix.SphinxClient import * from sphinxmix.SphinxNode import sphinx_process import json from builtins import bytes with open('test case.json', 'r') as f: test_case = json.load(f) privKeys = test_case['keys'] packet = test_case['packet'] packet = bytes(bytearray(packet)) params = SphinxParams() pki = {} use_nodes = [] for i, k in enumerate(privKeys): nid = "node" + str(i) use_nodes.append(nid) print(nid) x = Bn.from_binary(bytes(bytearray(k))) y = params.group.expon_base([x]) print("Public key: " + y.export(POINT_CONVERSION_UNCOMPRESSED).encode("hex") + "\n")
def test_iso_metadata_not_bootable(self, mock_open): mock_open.return_value = self.context_manager_mock self.file_mock.read.return_value = bytes(b'CD001') Iso.fix_boot_catalog('isofile')
def test_iso_metadata_iso9660_invalid(self, mock_open): mock_open.return_value = self.context_manager_mock self.file_mock.read.return_value = bytes(b'bogus') Iso.fix_boot_catalog('isofile')
from os.path import join as join_path from CryptoAttacks.PublicKey.rsa import RSAKey from CryptoAttacks.Utils import b2h, b2i, h2b, i2b current_path = dirname(abspath(__file__)) key_64 = RSAKey.import_key(join_path(current_path, 'private_key_64.pem')) key_256 = RSAKey.import_key(join_path(current_path, 'private_key_256.pem')) key_1024 = RSAKey.import_key(join_path(current_path, 'private_key_1024.pem')) key_1024_small_e = RSAKey.import_key(join_path(current_path, 'private_key_1024_small_e.pem')) key_2048 = RSAKey.import_key(join_path(current_path, 'private_key_2048.pem')) current_path = dirname(abspath(__file__)) hash_asn1 = { 'md5': bytes(b'\x30\x20\x30\x0c\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x05\x05\x00\x04\x10'), 'sha1': bytes(b'\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14'), 'sha256': bytes(b'\x30\x31\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20'), 'sha384': bytes(b'\x30\x41\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x02\x05\x00\x04\x30'), 'sha512': bytes(b'\x30\x51\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x03\x05\x00\x04\x40') } def encrypt(plaintext, key): plaintext = b2i(plaintext) ciphertext = pow(plaintext, key.e, key.n) return i2b(ciphertext) def decrypt(ciphertext, key): ciphertext = b2i(ciphertext)
def main(argv): del argv # Unused. if FLAGS.output_path is None: raise ValueError('No output_path has been set') api_def_map = c_api_util.ApiDefMap() op_codes = [] op_codes_forwarding = [] enum_store = EnumStore() op_names = api_def_map.op_names() if FLAGS.api_def_path is not None: for op_name in op_names: path = os.path.join(FLAGS.api_def_path, 'api_def_%s.pbtxt' % op_name) if not tf.gfile.Exists(path): continue with tf.gfile.Open(path, 'r') as fobj: data = fobj.read() try: api_def_map.put_api_def(data) except Exception as e: print('Cannot load api def for %s: %s' % (op_name, str(e))) num_generated = 0 for op_name in sorted(op_names): try: if op_name[0] == '_': continue op_def = api_def_map.get_op_def(op_name) if any(a.is_ref for a in op_def.input_arg): raise UnableToGenerateCodeError('has ref-valued input') if any(a.is_ref for a in op_def.output_arg): raise UnableToGenerateCodeError('has ref-valued output') api_def = api_def_map.get_api_def(bytes(op_name, 'utf8')) # It would be nicer to handle `StringTensor` in a more # general way by having `String` conform to `TensorFlowScalar`. default_op = Op(op_def, api_def, enum_store, string_valued=False) string_valued_op = Op(op_def, api_def, enum_store, string_valued=True) default_code = default_op.swift_function() string_valued_code = string_valued_op.swift_function() op_codes.append(default_code) string_valued_op_different = False if string_valued_code != default_code: string_valued_op_different = True op_codes.append(string_valued_code) default_code = default_op.swift_dispatch_function(x10_supported=op_name in X10_OPS) string_valued_code = string_valued_op.swift_dispatch_function() op_codes_forwarding.append(default_code) if string_valued_op_different: op_codes_forwarding.append(string_valued_code) num_generated += 1 except UnableToGenerateCodeError as e: print('Cannot generate code for %s: %s' % (op_name, e.details)) print('Generated code for %d/%d ops.' % (num_generated, len(op_names))) version_codes = [ ' static let generatedTensorFlowVersion = "%s"' % tf.__version__, ' static let generatedTensorFlowGitVersion = "%s"' % tf.__git_version__] swift_code = ( _WARNING + _HEADER + 'import CTensorFlow\n\n' + '@inlinable @inline(__always)\n' + 'func makeOp(_ name: String, _ nOutputs: Int) -> TFTensorOperation {\n' + ' _ExecutionContext.makeOp(name, nOutputs)\n' + '}\n'+ '\n\npublic enum _RawTFEager {\n\n' + '\n'.join(version_codes) + '\n\n' + '\n\n'.join(enum_store.enum_codes()) + '\n\n' + '\n'.join(op_codes) + '\n\n}\n') with tf.gfile.Open(FLAGS.output_path, 'w') as f: f.write(swift_code) swift_code = ( _WARNING + _HEADER + _DISPATCHER_TEMPLATE.format(raw_dispatching_enum= 'public enum _Raw {\n\n' + '\n'.join(version_codes) + '\n\n' + '\n\n'.join(enum_store.enum_codes_forwarding()) + '\n\n' + '\n'.join(op_codes_forwarding) + '\n\n}')) if FLAGS.dispatching_output_path: with tf.gfile.Open(FLAGS.dispatching_output_path, 'w') as f: f.write(swift_code)
def setup(self): self.data_flow = [True, None, None, None, None, None, None] self.data_out = [ bytes(b''), bytes(b'\n'), bytes(b'a'), bytes(b't'), bytes(b'a'), bytes(b'd') ] self.data_err = [ bytes(b''), bytes(b'r'), bytes(b'o'), bytes(b'r'), bytes(b'r'), bytes(b'e') ] self.flow = self.create_flow_method(self.poll) self.flow_out_available = self.create_flow_method(self.outavailable) self.flow_err_available = self.create_flow_method(self.erravailable) self.flow_out = self.create_flow_method(self.outdata) self.flow_err = self.create_flow_method(self.errdata)
def test_ensure_text(self): bytes_val = bytes(bytearray([0xe5, 0xbf, 0xab])) self.assertEqual(u'快', ensure_text(bytes_val)) with self.assertRaises(TypeError): ensure_text(45)
def do_download(radio): do_ident(radio) data = bytes() def status(): status = chirp_common.Status() status.cur = len(data) status.max = radio._memsize status.msg = "Cloning from radio" radio.status_fn(status) LOG.debug('Radio address 0x%04x' % len(data)) # Addresses 0x0000-0xBF00 pulled by block number (divide by 0x100) for block in range(0, 0xBF + 1): send(radio, make_frame('R', block)) cmd = radio.pipe.read(1) chunk = b'' if cmd == b'Z': data += bytes(b'\xff' * 256) LOG.debug('Radio reports empty block %02x' % block) elif cmd == b'W': chunk = bytes(radio.pipe.read(256)) if len(chunk) != 256: LOG.error('Received %i for block %02x' % (len(chunk), block)) raise errors.RadioError('Radio did not send block') data += chunk else: LOG.error('Radio sent %r (%02x), expected W(0x57)' % (cmd, chr(cmd))) raise errors.RadioError('Radio sent unexpected response') LOG.debug('Read block index %02x' % block) status() chksum = radio.pipe.read(1) if len(chksum) != 1: LOG.error('Checksum was %r' % chksum) raise errors.RadioError('Radio sent invalid checksum') _chksum = checksum_data(chunk) if chunk and _chksum != ord(chksum): LOG.error( 'Checksum failed for %i byte block 0x%02x: %02x != %02x' % (len(chunk), block, _chksum, ord(chksum))) raise errors.RadioError('Checksum failure while reading block. ' 'Check serial cable.') radio.pipe.write(b'\x06') if radio.pipe.read(1) != b'\x06': raise errors.RadioError('Post-block exchange failed') # Addresses 0xC000 - 0xD1F0 pulled by address for block in range(0x0100, 0x1200, 0x40): send(radio, make_frame('S', block, b'\x40')) x = radio.pipe.read(1) if x != b'X': raise errors.RadioError('Radio did not send block') chunk = radio.pipe.read(0x40) data += chunk LOG.debug('Read memory address %04x' % block) status() radio.pipe.write(b'\x06') if radio.pipe.read(1) != b'\x06': raise errors.RadioError('Post-block exchange failed') radio.pipe.write(b'E') if radio.pipe.read(1) != b'\x06': raise errors.RadioError('Radio failed to acknowledge completion') LOG.debug('Read %i bytes total' % len(data)) return data
def setUp(self): self.user = User.objects.create_user(username='******', password='******', email='*****@*****.**') self.client.credentials( HTTP_AUTHORIZATION='Basic %s' % base64.b64encode( bytes('{}:{}'.format('test', 'secret'), 'utf8')).decode()) redis.flushdb() self.app = Application.objects.create(id='app', name='app') self.channel = Channel.objects.create(name='stable') self.platform = Platform.objects.create(name='win') Platform.objects.create(name='mac') self.version1 = Version.objects.create(app=self.app, platform=self.platform, channel=self.channel, version='1.0.0.0', file=SimpleUploadedFile( './chrome_installer.exe', False)) self.version2 = Version.objects.create(app=self.app, platform=self.platform, channel=self.channel, version='2.0.0.0', file=SimpleUploadedFile( './chrome_installer.exe', False)) self.mac_version = SparkleVersion.objects.create( app=self.app, channel=self.channel, version='782.112', short_version='13.0.782.112', dsa_signature= 'MCwCFCdoW13VBGJWIfIklKxQVyetgxE7AhQTVuY9uQT0KOV1UEk21epBsGZMPg==', file=SimpleUploadedFile('./chrome.dmg', b'_' * 1024), file_size=1024) app_kwargs = dict(appid=self.app.id, version=str(self.version1.version)) install_app = create_app_xml(events=[fixtures.event_install_success], **app_kwargs) uninstall_app = create_app_xml( events=[fixtures.event_uninstall_success], **app_kwargs) self.install_app_list = [install_app] self.uninstall_app_list = [uninstall_app] self.mac_app = dict(appid=self.app.id, version=str(self.mac_version.short_version)) self._generate_fake_statistics() now = datetime.now() updates = [(datetime(now.year - 1, x, 1).strftime("%Y-%m"), x - 1) for x in range(2, 13)] updates.append((datetime(now.year, 1, 1).strftime("%Y-%m"), 0)) installs = [(datetime(now.year - 1, x, 1).strftime("%Y-%m"), 1) for x in range(2, 13)] installs.append((datetime(now.year, 1, 1).strftime("%Y-%m"), 1)) uninstalls = [(datetime(now.year - 1, x, 1).strftime("%Y-%m"), 1) for x in range(2, 13)] uninstalls.append((datetime(now.year, 1, 1).strftime("%Y-%m"), 1)) win_platform_statistics = dict(new=installs, updates=updates, uninstalls=uninstalls) mac_platform_statistics = dict(new=installs, updates=updates) self.users_statistics = dict(win=win_platform_statistics, mac=mac_platform_statistics) self.data = dict(data=dict(self.users_statistics))
def bchr(s): """Take an integer and make a 1-character byte string.""" return bytes([s])
def setUp(self): self.user = User.objects.create_user(username='******', password='******', email='*****@*****.**') self.client.credentials( HTTP_AUTHORIZATION='Basic %s' % base64.b64encode( bytes('{}:{}'.format('test', 'secret'), 'utf8')).decode()) redis.flushdb() self.app = Application.objects.create(id='app', name='app') self.channel = Channel.objects.create(name='stable') self.channel2 = Channel.objects.create(name='alpha') self.platform = Platform.objects.create(name='win') Platform.objects.create(name='mac') self.version1 = Version.objects.create(app=self.app, platform=self.platform, channel=self.channel, version='1.0.0.0', file=SimpleUploadedFile( './chrome_installer.exe', False)) self.version2 = Version.objects.create(app=self.app, platform=self.platform, channel=self.channel2, version='2.0.0.0', file=SimpleUploadedFile( './chrome_installer.exe', False)) self.sparkle_version1 = SparkleVersion.objects.create( app=self.app, channel=self.channel, version='0.0', short_version='3.0.0.0', file=SimpleUploadedFile('./chrome_installer.dmg', False)) self.sparkle_version2 = SparkleVersion.objects.create( app=self.app, channel=self.channel2, version='0.1', short_version='4.0.0.1', file=SimpleUploadedFile('./chrome_installer.dmg', False)) self.n_hours = 36 self._generate_fake_statistics() hours = [ datetime(2016, 2, 13, 0, tzinfo=pytz.UTC) + timedelta(hours=hour) for hour in range(self.n_hours) ] self.win_statistics_ch1 = [ ('1.0.0.0', [[hour.strftime("%Y-%m-%dT%H:%M:%S.%fZ"), self.n_hours - i] for (i, hour) in enumerate(hours)]) ] self.win_statistics_ch2 = [ ('2.0.0.0', [[hour.strftime("%Y-%m-%dT%H:%M:%S.%fZ"), i] for (i, hour) in enumerate(hours)]) ] self.win_statistics = self.win_statistics_ch1 + self.win_statistics_ch2 self.mac_statistics_ch1 = [ ('3.0.0.0', [[hour.strftime("%Y-%m-%dT%H:%M:%S.%fZ"), self.n_hours - i] for (i, hour) in enumerate(hours)]) ] self.mac_statistics_ch2 = [ ('4.0.0.1', [[hour.strftime("%Y-%m-%dT%H:%M:%S.%fZ"), i] for (i, hour) in enumerate(hours)]) ] self.mac_statistics = self.mac_statistics_ch1 + self.mac_statistics_ch2 self.win_daily_stat_ch1 = [('1.0.0.0', [['2016-02-13T00:00:00.000000Z', 36], ['2016-02-14T00:00:00.000000Z', 12]])] self.win_daily_stat_ch2 = [('2.0.0.0', [['2016-02-13T00:00:00.000000Z', 23], ['2016-02-14T00:00:00.000000Z', 35]])] self.win_daily_statistics = self.win_daily_stat_ch1 + self.win_daily_stat_ch2 self.mac_daily_stat_ch1 = [('3.0.0.0', [['2016-02-13T00:00:00.000000Z', 36], ['2016-02-14T00:00:00.000000Z', 12]])] self.mac_daily_stat_ch2 = [('4.0.0.1', [['2016-02-13T00:00:00.000000Z', 23], ['2016-02-14T00:00:00.000000Z', 35]])] self.mac_daily_statistics = self.mac_daily_stat_ch1 + self.mac_daily_stat_ch2 self.data = {'hourly': {}, 'daily': {}} self.data['hourly']['channel1'] = dict( data=dict(win=dict(self.win_statistics_ch1), mac=dict(self.mac_statistics_ch1))) self.data['hourly']['channel2'] = dict( data=dict(win=dict(self.win_statistics_ch2), mac=dict(self.mac_statistics_ch2))) self.data['hourly']['all'] = dict(data=dict( win=dict(self.win_statistics), mac=dict(self.mac_statistics))) self.data['hourly']['channel1'] = dict( data=dict(win=dict(self.win_statistics_ch1), mac=dict(self.mac_statistics_ch1))) self.data['daily']['channel1'] = dict( data=dict(win=dict(self.win_daily_stat_ch1), mac=dict(self.mac_daily_stat_ch1))) self.data['daily']['channel2'] = dict( data=dict(win=dict(self.win_daily_stat_ch2), mac=dict(self.mac_daily_stat_ch2))) self.data['daily']['all'] = dict( data=dict(win=dict(self.win_daily_statistics), mac=dict(self.mac_daily_statistics)))
def __generate_vhd(self, temporary_file, disk_size_in_gb): """ Kudos to Steven Edouard: https://gist.github.com/sedouard who provided the following: Generate an empty vhd fixed disk of the specified size. The file must be conform to the VHD Footer Format Specification at https://technet.microsoft.com/en-us/virtualization/bb676673.aspx#E3B which specifies the data structure as follows: * Field Size (bytes) * Cookie 8 * Features 4 * Version 4 * Data Offset 4 * TimeStamp 4 * Creator App 4 * Creator Ver 4 * CreatorHostOS 4 * Original Size 8 * Current Size 8 * Disk Geo 4 * Disk Type 4 * Checksum 4 * Unique ID 16 * Saved State 1 * Reserved 427 """ # disk size in bytes byte_size = int(disk_size_in_gb) * 1073741824 # the ascii string 'conectix' cookie = bytearray([0x63, 0x6f, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x78]) # no features enabled features = bytearray([0x00, 0x00, 0x00, 0x02]) # current file version version = bytearray([0x00, 0x01, 0x00, 0x00]) # in the case of a fixed disk, this is set to -1 data_offset = bytearray( [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]) # hex representation of seconds since january 1st 2000 timestamp = bytearray.fromhex( hex(long(datetime.now().strftime('%s')) - 946684800).replace( 'L', '').replace('0x', '').zfill(8)) # ascii code for 'wa' = windowsazure creator_app = bytearray([0x77, 0x61, 0x00, 0x00]) # ascii code for version of creator application creator_version = bytearray([0x00, 0x07, 0x00, 0x00]) # creator host os. windows or mac, ascii for 'wi2k' creator_os = bytearray([0x57, 0x69, 0x32, 0x6b]) original_size = bytearray.fromhex( hex(byte_size).replace('0x', '').zfill(16)) current_size = bytearray.fromhex( hex(byte_size).replace('0x', '').zfill(16)) # 0x820=2080 cylenders, 0x10=16 heads, 0x3f=63 sectors/track disk_geometry = bytearray([0x08, 0x20, 0x10, 0x3f]) # 0x2 = fixed hard disk disk_type = bytearray([0x00, 0x00, 0x00, 0x02]) # a uuid unique_id = bytearray.fromhex(uuid4().hex) # saved state and reserved saved_reserved = bytearray(428) # Compute Checksum with Checksum = ones compliment of sum of # all fields excluding the checksum field to_checksum_array = \ cookie + features + version + data_offset + \ timestamp + creator_app + creator_version + \ creator_os + original_size + current_size + \ disk_geometry + disk_type + unique_id + saved_reserved total = 0 for b in to_checksum_array: total += b total = ~total # handle two's compliment def tohex(val, nbits): return hex((val + (1 << nbits)) % (1 << nbits)) checksum = bytearray.fromhex(tohex(total, 32).replace('0x', '')) # vhd disk blob blob_data = \ cookie + features + version + data_offset + \ timestamp + creator_app + creator_version + \ creator_os + original_size + current_size + \ disk_geometry + disk_type + checksum + unique_id + saved_reserved with open(temporary_file.name, 'wb') as vhd: vhd.write(bytes(blob_data))
def main(): opt = parser.parse_args() opt.cuda = opt.gpu > -1 if opt.cuda: torch.cuda.set_device(opt.gpu) translator = onmt.Translator(opt) outF = codecs.open(opt.output, 'w', 'utf-8') predScoreTotal, predWordsTotal, goldScoreTotal, goldWordsTotal = 0, 0, 0, 0 srcBatch, tgtBatch = [], [] count = 0 tgtF = codecs.open(opt.tgt, 'r', 'utf-8') if opt.tgt else None if opt.dump_beam != "": import json translator.initBeamAccum() for line in addone(codecs.open(opt.src, 'r', 'utf-8')): if line is not None: srcTokens = line.split() srcBatch += [srcTokens] if tgtF: tgtTokens = tgtF.readline().split() if tgtF else None tgtBatch += [tgtTokens] if len(srcBatch) < opt.batch_size: continue else: # at the end of file, check last batch if len(srcBatch) == 0: break predBatch, predScore, goldScore, attn, src \ = translator.translate(srcBatch, tgtBatch) predScoreTotal += sum(score[0] for score in predScore) predWordsTotal += sum(len(x[0]) for x in predBatch) if tgtF is not None: goldScoreTotal += sum(goldScore) goldWordsTotal += sum(len(x) for x in tgtBatch) for b in range(len(predBatch)): count += 1 outF.write(" ".join([i for i in predBatch[b][0]]) + '\n') outF.flush() if opt.verbose: srcSent = ' '.join(srcBatch[b]) if translator.tgt_dict.lower: srcSent = srcSent.lower() os.write(1, bytes('SENT %d: %s\n' % (count, srcSent), 'UTF-8')) os.write( 1, bytes('PRED %d: %s\n' % (count, " ".join(predBatch[b][0])), 'UTF-8')) print("PRED SCORE: %.4f" % predScore[b][0]) if tgtF is not None: tgtSent = ' '.join(tgtBatch[b]) if translator.tgt_dict.lower: tgtSent = tgtSent.lower() os.write( 1, bytes('GOLD %d: %s\n' % (count, tgtSent), 'UTF-8')) print("GOLD SCORE: %.4f" % goldScore[b]) if opt.n_best > 1: print('\nBEST HYP:') for n in range(opt.n_best): os.write( 1, bytes( "[%.4f] %s\n" % (predScore[b][n], " ".join(predBatch[b][n])), 'UTF-8')) if opt.attn_debug: print('') for i, w in enumerate(predBatch[b][0]): print(w) _, ids = attn[b][0][i].sort(0, descending=True) for j in ids[:5].tolist(): print("\t%s\t%d\t%3f" % (srcTokens[j], j, attn[b][0][i][j])) srcBatch, tgtBatch = [], [] reportScore('PRED', predScoreTotal, predWordsTotal) if tgtF: reportScore('GOLD', goldScoreTotal, goldWordsTotal) if tgtF: tgtF.close() if opt.dump_beam: json.dump(translator.beam_accum, codecs.open(opt.dump_beam, 'w', 'utf-8'))
def visualize_rule_graph_to_file(self, filename): self._native.lib.rule_graph_visualize(self._scheduler, self._root_type_ids(), bytes(filename, 'utf-8'))
def test_decodable_dict(self): test_dict = {1: bytes('a', 'utf8'), 2: 'b', 3: {1: bytes('b', 'utf8'), 2: (1, bytes('a', 'utf8'))}} cleaned_dict = safe_exec.json_safe(test_dict) self.assertDictEqual(cleaned_dict, {'1': 'a', '2': 'b', '3': {'1': 'b', '2': [1, 'a']}})
def visualize_graph_to_file(self, session, filename): res = self._native.lib.graph_visualize(self._scheduler, session, bytes(filename, 'utf-8')) self._raise_or_return(res)
def calc_crc(self, data): data = bytes(data) # for python2/3 compatibility crc = 0 for a_byte in data: crc ^= a_byte return crc
def main(): dummy_parser = argparse.ArgumentParser(description='train.py') opts.model_opts(dummy_parser) dummy_opt = dummy_parser.parse_known_args([])[0] opt.cuda = opt.gpu > -1 if opt.cuda: torch.cuda.set_device(opt.gpu) translator = onmt.Translator(opt, dummy_opt.__dict__) out_file = codecs.open(opt.output, 'w', 'utf-8') out_file_att_debug = codecs.open(opt.output + '.att_debug', 'w', 'utf-8') # CUSTOM CHANGE pred_score_total, pred_words_total = 0, 0 gold_score_total, gold_words_total = 0, 0 if opt.dump_beam != "": import json translator.initBeamAccum() data = onmt.IO.ONMTDataset(opt.src, opt.tgt, translator.fields, None) test_data = onmt.IO.OrderedIterator(dataset=data, device=opt.gpu, batch_size=opt.batch_size, train=False, sort=False, shuffle=False) counter = count(1) for batch in test_data: pred_batch, gold_batch, pred_scores, gold_scores, attn, src \ = translator.translate(batch, data) pred_score_total += sum(score[0] for score in pred_scores) pred_words_total += sum(len(x[0]) for x in pred_batch) if opt.tgt: gold_score_total += sum(gold_scores) gold_words_total += sum(len(x) for x in batch.tgt[1:]) # z_batch: an iterator over the predictions, their scores, # the gold sentence, its score, and the source sentence for each # sentence in the batch. It has to be zip_longest instead of # plain-old zip because the gold_batch has length 0 if the target # is not included. z_batch = zip_longest(pred_batch, gold_batch, pred_scores, gold_scores, (sent.squeeze(1) for sent in src.split(1, dim=1))) for pred_sents, gold_sent, pred_score, gold_score, src_sent in z_batch: n_best_preds = [" ".join(pred) for pred in pred_sents[:opt.n_best]] tgt_words = '\n'.join(n_best_preds) ##################### CUSTOM CHANGE ######################### # WRITES ATTENTIONS IN NEMATUS FORMAT ############################################################# if opt.attn_debug: src_words = get_src_words(src_sent, translator.fields["src"].vocab.itos) out_file_att_debug.write("0 ||| " + tgt_words + " ||| 0 ||| " + src_words + " ||| 0 0") out_file_att_debug.write('\n') att_matrix = attn[0][0] # WORKS FOR ONE BARCH EXAMPLES ONLY for row in att_matrix: for weight in row: out_file_att_debug.write("{:.15f}".format(weight) + " ") out_file_att_debug.write("{:.15f}".format( 0)) # corresponds to the source EOS symbol out_file_att_debug.write('\n') out_file_att_debug.write('\n') out_file_att_debug.flush() ############################################################# out_file.write(tgt_words) out_file.write('\n') out_file.flush() if opt.verbose: sent_number = next(counter) words = get_src_words(src_sent, translator.fields["src"].vocab.itos) os.write( 1, bytes('\nSENT %d: %s\n' % (sent_number, words), 'UTF-8')) best_pred = n_best_preds[0] best_score = pred_score[0] os.write( 1, bytes('PRED %d: %s\n' % (sent_number, best_pred), 'UTF-8')) print("PRED SCORE: %.4f" % best_score) if opt.tgt: tgt_sent = ' '.join(gold_sent) os.write( 1, bytes('GOLD %d: %s\n' % (sent_number, tgt_sent), 'UTF-8')) print("GOLD SCORE: %.4f" % gold_score) if len(n_best_preds) > 1: print('\nBEST HYP:') for score, sent in zip(pred_score, n_best_preds): os.write(1, bytes("[%.4f] %s\n" % (score, sent), 'UTF-8')) report_score('PRED', pred_score_total, pred_words_total) if opt.tgt: report_score('GOLD', gold_score_total, gold_words_total) if opt.dump_beam: json.dump(translator.beam_accum, codecs.open(opt.dump_beam, 'w', 'utf-8'))
def db_value(self, value): # pragma: no cover if isinstance(value, bytearray): return bytes(value) return value
def execute(self, context): """ Execute the bash command in a temporary directory which will be cleaned afterwards """ self.log.info("Tmp dir root location: \n %s", gettempdir()) # Prepare env for child process. env = self.env if env is None: env = os.environ.copy() airflow_context_vars = context_to_airflow_vars(context, in_env_var_format=True) self.log.debug( "Exporting the following env vars:\n%s", "\n".join([ "{}={}".format(k, v) for k, v in airflow_context_vars.items() ]), ) env.update(airflow_context_vars) self.lineage_data = self.bash_command with TemporaryDirectory(prefix="airflowtmp") as tmp_dir: with NamedTemporaryFile(dir=tmp_dir, prefix=self.task_id) as f: f.write(bytes(self.bash_command, "utf_8")) f.flush() fname = f.name script_location = os.path.abspath(fname) self.log.info("Temporary script location: %s", script_location) def pre_exec(): # Restore default signal disposition and invoke setsid for sig in ("SIGPIPE", "SIGXFZ", "SIGXFSZ"): if hasattr(signal, sig): signal.signal(getattr(signal, sig), signal.SIG_DFL) os.setsid() self.log.info("Running command: %s", self.bash_command) self.sub_process = Popen(["bash", fname], stdout=PIPE, stderr=STDOUT, cwd=tmp_dir, env=env, preexec_fn=pre_exec) self.log.info("Output:") line = "" for line in iter(self.sub_process.stdout.readline, b""): line = line.decode(self.output_encoding).rstrip() self.log.info(line) self.sub_process.wait() self.log.info("Command exited with return code %s", self.sub_process.returncode) if self.sub_process.returncode: raise AirflowException("Bash command failed") if self.xcom_push_flag: return line
def test_relocate_boot_catalog(self, mock_open): mock_open.return_value = self.context_manager_mock volume_descriptor = \ bytes(b'CD001') + bytes(b'_') * (0x08c - 0x5) + bytes(b'0x1d5f23a') eltorito_descriptor = \ bytes(b'EL TORITO SPECIFICATION') + \ bytes(b'_') * (0x47 - 0x17) + bytes(b'0x1d5f23a') new_volume_descriptor = \ bytes(b'bogus') next_new_volume_descriptor = \ bytes(b'TEA01') new_boot_catalog = bytes(b'\x00') * 0x800 read_results = [ new_boot_catalog, next_new_volume_descriptor, new_volume_descriptor, bytes(b'catalog'), eltorito_descriptor, volume_descriptor ] def side_effect(arg): return read_results.pop() self.file_mock.read.side_effect = side_effect Iso.relocate_boot_catalog('isofile') assert self.file_mock.write.call_args_list == [ call(bytes(b'catalog')), call( bytes(b'EL TORITO SPECIFICATION') + bytes(b'_') * (0x47 - 0x17) + bytes(b'\x13\x00\x00\x005f23a')) ]
GSM_BASIC_CHARSET = ( u'@£$¥èéùìòÇ\nØø\rÅåΔ_ΦΓΛΩΠΨΣΘΞ\x1bÆæßÉ !"#¤%&\'()*+,-./0123456789:;<=>?¡' u'ABCDEFGHIJKLMNOPQRSTUVWXYZÄÖÑܧ¿abcdefghijklmnopqrstuvwxyzäöñüà') GSM_EXT_CHARSET = u'\f^{}\\[~]|€' GSM_CHARSET = GSM_BASIC_CHARSET + GSM_EXT_CHARSET basic_pairs = dict(zip( [i for i in range(len(GSM_BASIC_CHARSET))], [ord(c) for c in GSM_BASIC_CHARSET], )) ext_pairs = dict(zip( [bytes([ord('\x1b'), ord(c)]) for c in '\x0a\x14\x28\x29\x2f\x3c\x3d\x3d\x40\x65'], [ord(c) for c in GSM_EXT_CHARSET] )) decoding_map = basic_pairs decoding_map.update(ext_pairs) encoding_map = codecs.make_encoding_map(decoding_map) def decode_gsm0338(text, decoding_map): ESCAPE = ord('\x1b') SPACE = ord(' ') decoded = u'' skip = None
def test_fix_boot_catalog(self, mock_open): mock_open.return_value = self.context_manager_mock volume_descriptor = \ bytes(b'CD001') + bytes(b'_') * (0x08c - 0x5) + bytes(b'0x1d5f23a') eltorito_descriptor = \ bytes(b'EL TORITO SPECIFICATION') + \ bytes(b'_') * (0x47 - 0x17) + bytes(b'0x1d5f23a') boot_catalog = bytes(b'_') * 64 + struct.pack('B', 0x88) + \ bytes(b'_') * 32 read_results = [boot_catalog, eltorito_descriptor, volume_descriptor] def side_effect(arg): return read_results.pop() self.file_mock.read.side_effect = side_effect Iso.fix_boot_catalog('isofile') if sys.byteorder == 'big': assert self.file_mock.write.call_args_list == [ call( bytes(b'_') * 44 + bytes(b'\x01Legacy (isolinux)\x00\x00\x91\xef\x00\x01') + bytes(b'\x00') * 28 + bytes(b'\x88___________\x01UEFI (grub)') + bytes(b'\x00') * 8) ] else: assert self.file_mock.write.call_args_list == [ call( bytes(b'_') * 44 + bytes(b'\x01Legacy (isolinux)\x00\x00\x91\xef\x01') + bytes(b'\x00') * 29 + bytes(b'\x88___________\x01UEFI (grub)') + bytes(b'\x00') * 8) ]
publisher = "" date = "" source = "" language = "" rights = "" url = "" creator = "" coverage = "" query = "SELECT * FROM record" sqlite.execute(query) rows = sqlite.fetchall() for row in rows: identifier = row[1] identifier = bytes(identifier, "utf-8") identifier = base64.b64encode(identifier) identifier = identifier.decode("utf-8").rstrip("=") title = row[2] description = row[3] subject = row[4] type = row[5] format = row[6] format = clear_format(format) relation = row[7] publisher = row[8] date = row[9] source = row[10] language = row[11] rights = row[12] url = row[13]
def pad_string(string, chunk_size=AES.block_size): assert chunk_size <= 256, 'We are using one byte to represent padding' to_pad = (chunk_size - (len(string) + 1)) % chunk_size return bytes([to_pad]) + string + bytes([0] * to_pad)
def naming_hash(string, length=8): return hashlib.sha256(bytes(string, 'utf-8')).hexdigest().lower()[:length]