Exemplo n.º 1
0
def canonicalize_env(env):
    """Windows requires that environment be dicts with bytes as keys and values
    This function converts any unicode entries for Windows only, returning the
    dictionary untouched in other environments.

    Parameters
    ----------
    env : dict
        environment dictionary with unicode or bytes keys and values

    Returns
    -------
    env : dict
        Windows: environment dictionary with bytes keys and values
        Other: untouched input ``env``
    """
    if os.name != 'nt':
        return env

    # convert unicode to string for python 2
    if not PY3:
        from future.utils import bytes_to_native_str
    out_env = {}
    for key, val in env.items():
        if not isinstance(key, bytes):
            key = key.encode('utf-8')
        if not isinstance(val, bytes):
            val = val.encode('utf-8')
        if not PY3:
            key = bytes_to_native_str(key)
            val = bytes_to_native_str(val)
        out_env[key] = val
    return out_env
Exemplo n.º 2
0
 def remote_advise_corrupt_share(self, share_type, storage_index, shnum,
                                 reason):
     # This is a remote API, I believe, so this has to be bytes for legacy
     # protocol backwards compatibility reasons.
     assert isinstance(share_type, bytes)
     assert isinstance(reason, bytes), "%r is not bytes" % (reason, )
     fileutil.make_dirs(self.corruption_advisory_dir)
     now = time_format.iso_utc(sep="T")
     si_s = si_b2a(storage_index)
     # windows can't handle colons in the filename
     fn = os.path.join(self.corruption_advisory_dir,
                       "%s--%s-%d" % (now, si_s, shnum)).replace(":", "")
     with open(fn, "w") as f:
         f.write("report: Share Corruption\n")
         f.write("type: %s\n" % bytes_to_native_str(share_type))
         f.write("storage_index: %s\n" % bytes_to_native_str(si_s))
         f.write("share_number: %d\n" % shnum)
         f.write("\n")
         f.write(bytes_to_native_str(reason))
         f.write("\n")
     log.msg(format=("client claims corruption in (%(share_type)s) " +
                     "%(si)s-%(shnum)d: %(reason)s"),
             share_type=share_type,
             si=si_s,
             shnum=shnum,
             reason=reason,
             level=log.SCARY,
             umid="SGx2fA")
     return None
Exemplo n.º 3
0
 def cols(self, size, current):
     self.__initcheck()
     ic = current.adapter.getCommunicator()
     types = self.__types
     names = self.__mea.colnames
     descs = self.__descriptions
     cols = []
     for i in range(len(types)):
         t = types[i]
         if isbytes(t):
             t = bytes_to_native_str(t)
         n = names[i]
         d = descs[i]
         if isbytes(d):
             d = bytes_to_native_str(d)
         try:
             col = ic.findObjectFactory(t).create(t)
             col.name = n
             col.description = d
             col.setsize(size)
             col.settable(self.__mea)
             cols.append(col)
         except:
             msg = traceback.format_exc()
             raise omero.ValidationException(
                 None, msg, "BAD COLUMN TYPE: %s for %s" % (t, n))
     return cols
Exemplo n.º 4
0
def _remove_uuid(proto):
    if isinstance(proto, caffe2_pb2.NetDef):
        for op in proto.op:
            op.ClearField(bytes_to_native_str(b'uuid'))
    elif isinstance(proto, caffe2_pb2.OperatorDef):
        proto.ClearField(bytes_to_native_str(b'uuid'))
    return proto
Exemplo n.º 5
0
def filter_smudge():
    # Operate on stdin/stdout in binary mode
    firstline = next(stdin)
    if firstline == b"bigstore\n":
        hash_function_name = next(stdin)
        hexdigest = next(stdin)
        source_filename = object_filename(
            bytes_to_native_str(hash_function_name)[:-1],
            bytes_to_native_str(hexdigest)[:-1])

        try:
            with open(source_filename):
                pass
        except IOError:
            stdout.write(firstline)
            stdout.write(hash_function_name)
            stdout.write(hexdigest)
        else:
            with open(source_filename, 'rb') as file:
                for line in file:
                    stdout.write(line)
    else:
        stdout.write(firstline)
        for line in stdin:
            stdout.write(line)
Exemplo n.º 6
0
 def _write_handle(self, handle, data, response=True):
     if response:
         return self.requester.write_by_handle(handle,
                                               bytes_to_native_str(data))
     else:
         return self.requester.write_without_response_by_handle(
             handle, bytes_to_native_str(data))
Exemplo n.º 7
0
 def test_str_encode_decode_with_py2_str_arg(self):
     # Try passing a standard Py2 string (as if unicode_literals weren't imported)
     b = str(TEST_UNICODE_STR).encode(utils.bytes_to_native_str(b'utf-8'))
     self.assertTrue(isinstance(b, bytes))
     self.assertFalse(isinstance(b, str))
     s = b.decode(utils.bytes_to_native_str(b'utf-8'))
     self.assertTrue(isinstance(s, str))
     self.assertEqual(s, TEST_UNICODE_STR)
Exemplo n.º 8
0
 def test_str_encode_decode_with_py2_str_arg(self):
     # Try passing a standard Py2 string (as if unicode_literals weren't imported)
     b = str(TEST_UNICODE_STR).encode(utils.bytes_to_native_str(b'utf-8'))
     self.assertTrue(isinstance(b, bytes))
     self.assertFalse(isinstance(b, str))
     s = b.decode(utils.bytes_to_native_str(b'utf-8'))
     self.assertTrue(isinstance(s, str))
     self.assertEqual(s, TEST_UNICODE_STR)
Exemplo n.º 9
0
 def convert_from_bytes(cls, elem):
     if isinstance(elem, bytes):
         elem = bytes_to_native_str(elem)
     elif isinstance(elem, list):
         elem = [cls.convert_from_bytes(e) for e in elem]
     elif isinstance(elem, dict):
         elem = OrderedDict(
             (bytes_to_native_str(n), cls.convert_from_bytes(e))
             for n, e in elem.items())
     return elem
Exemplo n.º 10
0
 def convert_from_bytes(cls, elem):
     if isinstance(elem, bytes):
         elem = bytes_to_native_str(elem)
     elif isinstance(elem, list):
         elem = [cls.convert_from_bytes(e) for e in elem]
     elif isinstance(elem, dict):
         elem = OrderedDict(
             (bytes_to_native_str(n), cls.convert_from_bytes(e))
             for n, e in elem.items())
     return elem
Exemplo n.º 11
0
    def config_gen_csv(self, keys, as_type=None, output=None):
        """
        Generate csv file or print it out to STDOUT
        """

        if keys is None:
            print(
                'No --keys specified, please make sure you have at least one key to generate'
            )
            return

        # split --keys=a,b,c,d to list [a,b,c,d]
        keys = [c.strip() for c in keys.split(',')]

        image_list = None
        if as_type == "image" and not self.runtime.rpms:
            image_list = self.runtime.image_metas()
        if as_type == "rpm" and not self.runtime.images:
            image_list = self.runtime.rpm_metas()

        if image_list is None:
            print(
                'Not correct --type specified (--type image or --type rpm). '
                'Or not consistent with global options: --images/-i and --rpms/-r'
            )
            return

        def _write_rows(w):
            # write header
            w.writerow(keys)
            # write values
            for value in image_list:
                value_list = []
                for k in keys:
                    if k == "key":
                        value_list.append(value.config_filename)
                    else:
                        value_list.append(value.config.get(k, None))
                w.writerow(value_list)

        if output is None:
            writer = csv.writer(sys.stdout,
                                delimiter=bytes_to_native_str(b','),
                                quotechar=bytes_to_native_str(b'"'),
                                quoting=csv.QUOTE_MINIMAL)
            _write_rows(writer)
            return

        with io.open(output, mode='w', encoding="utf-8") as csv_file:
            writer = csv.writer(csv_file,
                                delimiter=bytes_to_native_str(b','),
                                quotechar=bytes_to_native_str(b'"'),
                                quoting=csv.QUOTE_MINIMAL)
            _write_rows(writer)
Exemplo n.º 12
0
    def encrypt(self, plaintext):
        # type: (AnyStr) -> _OlmMessage
        """Encrypts a message using the session. Returns the ciphertext as an
        base64 encoded strin on success. Raises OlmSessionError on failure. If
        there weren't enough random bytes to encrypt the message the error
        message for the exception will be NOT_ENOUGH_RANDOM.

        Args:
            plaintext(str): The plaintext message that will be encrypted.
        """
        byte_plaintext = to_bytes(plaintext)

        r_length = lib.olm_encrypt_random_length(self._session)
        random = URANDOM(r_length)
        random_buffer = ffi.new("char[]", random)

        message_type = lib.olm_encrypt_message_type(self._session)

        self._check_error(message_type)

        ciphertext_length = lib.olm_encrypt_message_length(
            self._session, len(plaintext)
        )
        ciphertext_buffer = ffi.new("char[]", ciphertext_length)

        plaintext_buffer = ffi.new("char[]", byte_plaintext)

        self._check_error(lib.olm_encrypt(
            self._session,
            plaintext_buffer, len(byte_plaintext),
            random_buffer, r_length,
            ciphertext_buffer, ciphertext_length,
        ))

        if message_type == lib.OLM_MESSAGE_TYPE_PRE_KEY:
            return OlmPreKeyMessage(
                bytes_to_native_str(ffi.unpack(
                    ciphertext_buffer,
                    ciphertext_length
                )))
        elif message_type == lib.OLM_MESSAGE_TYPE_MESSAGE:
            return OlmMessage(
                bytes_to_native_str(ffi.unpack(
                    ciphertext_buffer,
                    ciphertext_length
                )))
        else:  # pragma: no cover
            raise ValueError("Unknown message type")
Exemplo n.º 13
0
    def encrypt(self, plaintext):
        # type: (AnyStr) -> _OlmMessage
        """Encrypts a message using the session. Returns the ciphertext as a
        base64 encoded string on success. Raises OlmSessionError on failure.

        Args:
            plaintext(str): The plaintext message that will be encrypted.
        """
        byte_plaintext = to_bytearray(plaintext)

        r_length = lib.olm_encrypt_random_length(self._session)
        random = URANDOM(r_length)

        try:
            message_type = lib.olm_encrypt_message_type(self._session)

            self._check_error(message_type)

            ciphertext_length = lib.olm_encrypt_message_length(
                self._session, len(byte_plaintext))
            ciphertext_buffer = ffi.new("char[]", ciphertext_length)

            self._check_error(
                lib.olm_encrypt(
                    self._session,
                    ffi.from_buffer(byte_plaintext),
                    len(byte_plaintext),
                    ffi.from_buffer(random),
                    r_length,
                    ciphertext_buffer,
                    ciphertext_length,
                ))
        finally:
            # clear out copies of plaintext
            if byte_plaintext is not plaintext:
                for i in range(0, len(byte_plaintext)):
                    byte_plaintext[i] = 0

        if message_type == lib.OLM_MESSAGE_TYPE_PRE_KEY:
            return OlmPreKeyMessage(
                bytes_to_native_str(
                    ffi.unpack(ciphertext_buffer, ciphertext_length)))
        elif message_type == lib.OLM_MESSAGE_TYPE_MESSAGE:
            return OlmMessage(
                bytes_to_native_str(
                    ffi.unpack(ciphertext_buffer, ciphertext_length)))
        else:  # pragma: no cover
            raise ValueError("Unknown message type")
Exemplo n.º 14
0
    def asDataFrame(self, rowIdAndVersionInIndex=True):
        test_import_pandas()
        import pandas as pd

        try:
            #Handle bug in pandas 0.19 requiring quotechar to be str not unicode or newstr
            quoteChar = bytes_to_native_str(bytes(self.quoteCharacter)) if six.PY2 else self.quoteCharacter
            ## assign line terminator only if for single character
            ## line terminators (e.g. not '\r\n') 'cause pandas doesn't
            ## longer line terminators. See:
            ##    https://github.com/pydata/pandas/issues/3501
            ## "ValueError: Only length-1 line terminators supported"
            df = pd.read_csv(self.filepath,
                    sep=self.separator,
                    lineterminator=self.lineEnd if len(self.lineEnd) == 1 else None,
                    quotechar=quoteChar,
                    escapechar=self.escapeCharacter,
                    header = 0 if self.header else None,
                    skiprows=self.linesToSkip)
        except pd.parser.CParserError as ex1:
            df = pd.DataFrame()

        if rowIdAndVersionInIndex and "ROW_ID" in df.columns and "ROW_VERSION" in df.columns:
            ## combine row-ids (in index) and row-versions (in column 0) to
            ## make new row labels consisting of the row id and version
            ## separated by a dash.
            df.index = row_labels_from_id_and_version(zip(df["ROW_ID"], df["ROW_VERSION"]))
            del df["ROW_ID"]
            del df["ROW_VERSION"]

        return df
Exemplo n.º 15
0
    def callback(self, monitorId, fileList):
        """
            Callback required by FSEvents.FSEventStream.

            :Parameters:


            :return: No explicit return value.

        """

        eventList = []
        for fileEvent in fileList:
            fileId = fileEvent[0]
            if isbytes(fileId):
                fileId = bytes_to_native_str(fileId)
            info = monitors.EventInfo(fileId, fileEvent[1])
            eventList.append(info)

        proxy = self.proxies[monitorId]

        try:
            self.log.info('Event notification on monitor id= %s', monitorId)
            self.log.debug(' ...notifications are: %s', str(eventList))
            proxy.fsEventHappened(native_str(monitorId), eventList)
        except Exception as e:
            self.log.info('Callback to monitor id=' + monitorId +
                          ' failed. Reason: ' + str(e))
Exemplo n.º 16
0
    def decrypt(self, message):
        # type: (_OlmMessage) -> str
        """Decrypts a message using the session. Returns the plaintext string
        on success. Raises OlmSessionError on failure. If the base64 couldn't
        be decoded then the error message will be "INVALID_BASE64". If the
        message is for an unsupported version of the protocol the error message
        will be "BAD_MESSAGE_VERSION". If the message couldn't be decoded then
        the error message will be "BAD_MESSAGE_FORMAT". If the MAC on the
        message was invalid then the error message will be "BAD_MESSAGE_MAC".

        Args:
            message(OlmMessage): The Olm message that will be decrypted. It can
            be either an OlmPreKeyMessage or an OlmMessage.
        """
        if not message.ciphertext:
            raise ValueError("Ciphertext can't be empty")

        byte_ciphertext = to_bytes(message.ciphertext)
        ciphertext_buffer = ffi.new("char[]", byte_ciphertext)

        max_plaintext_length = lib.olm_decrypt_max_plaintext_length(
            self._session, message.message_type, ciphertext_buffer,
            len(byte_ciphertext))
        plaintext_buffer = ffi.new("char[]", max_plaintext_length)
        ciphertext_buffer = ffi.new("char[]", byte_ciphertext)
        plaintext_length = lib.olm_decrypt(self._session, message.message_type,
                                           ciphertext_buffer,
                                           len(byte_ciphertext),
                                           plaintext_buffer,
                                           max_plaintext_length)
        self._check_error(plaintext_length)
        return bytes_to_native_str(
            ffi.unpack(plaintext_buffer, plaintext_length))
Exemplo n.º 17
0
 def addWatch(self, path, mask):
     if not self.isPathWatched(path):
         try:
             if isbytes(path):
                 path_obj = pathModule.path(bytes_to_native_str(path))
             else:
                 path_obj = pathModule.path(path)
             res = pyinotify.WatchManager.add_watch(self,
                                                    path,
                                                    mask,
                                                    rec=False,
                                                    auto_add=False,
                                                    quiet=False)
             self.watchPaths.update(res)
             self.watchParams[path] = copy.copy(
                 self.watchParams[path_obj.parent])
             if self.watchParams[path].getRec():
                 for d in path_obj.dirs():
                     self.addWatch(d, mask)
             if self.isPathWatched(path):
                 self.log.info('Watch added on: %s' % path)
             else:
                 self.log.info('Unable to add watch on: %s' % path)
         except Exception as e:
             self.log.error('Unable to add watch on: %s : %s' % (path, e))
Exemplo n.º 18
0
    def __init__(self, seed):
        # type: (bytes) -> None
        """Create a new signing object.

        Args:
            seed(bytes): the seed to use as the private key for signing.  The
                seed must have the same length as the seeds generated by
                PkSigning.generate_seed().
        """
        if not seed:
            raise ValueError("seed can't be empty")

        self._buf = ffi.new("char[]", lib.olm_pk_signing_size())
        self._pk_signing = lib.olm_pk_signing(self._buf)
        track_for_finalization(self, self._pk_signing, _clear_pk_signing)

        seed_buffer = ffi.new("char[]", seed)

        pubkey_length = lib.olm_pk_signing_public_key_length()
        pubkey_buffer = ffi.new("char[]", pubkey_length)

        ret = lib.olm_pk_signing_key_from_seed(self._pk_signing, pubkey_buffer,
                                               pubkey_length, seed_buffer,
                                               len(seed))

        # zero out copies of the seed
        lib.memset(seed_buffer, 0, len(seed))

        self._check_error(ret)

        self.public_key = bytes_to_native_str(
            ffi.unpack(pubkey_buffer, pubkey_length))
Exemplo n.º 19
0
    def sign(self, message):
        # type: (AnyStr) -> str
        """Signs a message with this account.

        Signs a message with the private ed25519 identity key of this account.
        Returns the signature.
        Raises OlmAccountError on failure.

        Args:
            message(str): The message to sign.
        """
        bytes_message = to_bytearray(message)
        out_length = lib.olm_account_signature_length(self._account)
        out_buffer = ffi.new("char[]", out_length)

        try:
            self._check_error(
                lib.olm_account_sign(self._account,
                                     ffi.from_buffer(bytes_message),
                                     len(bytes_message), out_buffer,
                                     out_length))
        finally:
            # clear out copies of the message, which may be plaintext
            if bytes_message is not message:
                for i in range(0, len(bytes_message)):
                    bytes_message[i] = 0

        return bytes_to_native_str(ffi.unpack(out_buffer, out_length))
Exemplo n.º 20
0
 def assertEqual(self, op_list1, op_list2):
     if isinstance(op_list1, list) and isinstance(op_list2, list):
         for op in op_list1 + op_list2:
             if isinstance(op, caffe2_pb2.OperatorDef):
                 op.ClearField(bytes_to_native_str(b'uuid'))
     return super(TestGradientCalculation, self).assertEqual(
         op_list1, op_list2)
Exemplo n.º 21
0
    def encrypt(self, plaintext):
        # type: (AnyStr) -> str
        """Encrypt a message.

        Returns the encrypted ciphertext.

        Args:
            plaintext(str): A string that will be encrypted using the group
                session.
        """
        byte_plaintext = to_bytes(plaintext)
        message_length = lib.olm_group_encrypt_message_length(
            self._session, len(byte_plaintext))

        message_buffer = ffi.new("char[]", message_length)

        plaintext_buffer = ffi.new("char[]", byte_plaintext)

        ret = lib.olm_group_encrypt(
            self._session,
            plaintext_buffer,
            len(byte_plaintext),
            message_buffer,
            message_length,
        )
        self._check_error(ret)
        return bytes_to_native_str(ffi.unpack(message_buffer, message_length))
Exemplo n.º 22
0
    def __init__(self, uuid):
        self._uuid = copy(BLEUUID.BASE_UUID_BYTES)

        if isinstance(uuid, UUID):
            # Assume that the UUID is correct
            self._uuid = bytearray(uuid.bytes)
        elif isinstance(uuid, bytes):
            self._uuid[2:4] = bytearray(bytes_to_native_str(uuid))
        elif isinstance(uuid, str):
            if len(uuid) == 4:
                # 16-bit UUID
                part = int(uuid, 16).to_bytes(2, 'little')
                self._uuid[2:4] = bytearray(part)
            elif len(uuid) == 8:
                # 32-bit UUID
                part = int(uuid, 16).to_bytes(4, 'little')
                self._uuid[0:4] = bytearray(part)
            elif len(uuid) == 36:
                # 128-bit UUID
                self._uuid = bytearray(UUID(uuid).bytes)
            else:
                raise ValueError("Invalid UUID")
        elif isinstance(uuid, int):
            if uuid < 65536:
                # 16-bit UUID
                part = int(uuid).to_bytes(2, 'little')
                self._uuid[2:4] = bytearray(part)
            elif uuid < 2**32:
                # 32-bit UUID
                part = int(uuid).to_bytes(4, 'little')
                self._uuid[0:4] = bytearray(part)
            else:
                raise ValueError("Invalid UUID")
        else:
            raise ValueError("Invalid UUID (type error)")
Exemplo n.º 23
0
    def export_session(self, message_index):
        # type: (int) -> str
        """Export an inbound group session

        Export the base64-encoded ratchet key for this session, at the given
        index, in a format which can be used by import_session().

        Raises OlmGroupSessionError on failure. The error message for the
        exception will be:

        * OLM_UNKNOWN_MESSAGE_INDEX if we do not have a session key
            corresponding to the given index (ie, it was sent before the
            session key was shared with us)

        Args:
            message_index(int): The message index at which the session should
                be exported.
        """

        export_length = lib.olm_export_inbound_group_session_length(
            self._session)

        export_buffer = ffi.new("char[]", export_length)
        ret = lib.olm_export_inbound_group_session(self._session,
                                                   export_buffer,
                                                   export_length,
                                                   message_index)
        self._check_error(ret)
        return bytes_to_native_str(ffi.unpack(export_buffer, export_length))
Exemplo n.º 24
0
def cleanup_x509_text(txt):
    kts = txt.split(b'\n')
    kt = [
        b'  ' + x for x in kts
        if len(x) and not (x.startswith(b'----') and x.endswith(b'----'))
    ]
    return bytes_to_native_str(b'  ' + b'\n  '.join(kt) + b'\n')
Exemplo n.º 25
0
    def encrypt(self, plaintext):
        # type: (AnyStr) -> str
        """Encrypt a message.

        Returns the encrypted ciphertext.

        Args:
            plaintext(str): A string that will be encrypted using the group
                session.
        """
        byte_plaintext = to_bytearray(plaintext)
        message_length = lib.olm_group_encrypt_message_length(
            self._session, len(byte_plaintext))

        message_buffer = ffi.new("char[]", message_length)

        try:
            ret = lib.olm_group_encrypt(
                self._session,
                ffi.from_buffer(byte_plaintext),
                len(byte_plaintext),
                message_buffer,
                message_length,
            )
            self._check_error(ret)
        finally:
            # clear out copies of plaintext
            if byte_plaintext is not plaintext:
                for i in range(0, len(byte_plaintext)):
                    byte_plaintext[i] = 0

        return bytes_to_native_str(ffi.unpack(message_buffer, message_length))
Exemplo n.º 26
0
    def login_for_jwt(self):
        try:
            session = requests.Session()
            session.headers.update({
                'content-type': 'application/json',
                'accept': 'application/json'
            })

            password_prompt = bytes_to_native_str(b"Password :"******"Udacity Login required.")
            email = input('Email :')
            password = getpass.getpass(password_prompt)
            udacity_login(session, self.root_url, email, password)
        except requests.exceptions.HTTPError as e:
            if e.response.status_code == 403:
                raise ProjectAssistantAuthenticationError(
                    "Authentication failed")
            else:
                raise e

        try:
            r = session.post(self.root_url + '/auth_tokens')
            r.raise_for_status()
        except:
            raise ProjectAssistantAuthenticationError("Authentication failed")

        jwt = r.json()['auth_token']

        return jwt
Exemplo n.º 27
0
    def decrypt(self, message):
        # type (PkMessage) -> str
        ephermal_key = to_bytes(message.ephermal_key)
        ephermal_key_size = len(ephermal_key)

        mac = to_bytes(message.mac)
        mac_length = len(mac)

        ciphertext = to_bytes(message.ciphertext)
        ciphertext_length = len(ciphertext)

        max_plaintext_length = lib.olm_pk_max_plaintext_length(
            self._pk_decryption,
            ciphertext_length
        )
        plaintext = ffi.new("char[]", max_plaintext_length)

        ret = lib.olm_pk_decrypt(
            self._pk_decryption,
            ephermal_key, ephermal_key_size,
            mac, mac_length,
            ciphertext, ciphertext_length,
            plaintext, max_plaintext_length)
        self._check_error(ret)

        unpacked_plaintext = (ffi.unpack(
            plaintext,
            ret
        ))

        return bytes_to_native_str(unpacked_plaintext)
Exemplo n.º 28
0
Arquivo: sas.py Projeto: bitcard/olm2
    def calculate_mac(self, message, extra_info):
        # type: (str, str) -> str
        """Generate a message authentication code based on the shared secret.

        Args:
            message (str): The message to produce the authentication code for.
            extra_info (str): Extra information to mix in when generating the
                MAC

        Raises OlmSasError on failure.

        """
        byte_message = to_bytes(message)
        byte_info = to_bytes(extra_info)

        mac_length = lib.olm_sas_mac_length(self._sas)
        mac_buffer = ffi.new("char[]", mac_length)

        self._check_error(
            lib.olm_sas_calculate_mac(
                self._sas,
                ffi.from_buffer(byte_message),
                len(byte_message),
                ffi.from_buffer(byte_info),
                len(byte_info),
                mac_buffer,
                mac_length
            )
        )
        return bytes_to_native_str(ffi.unpack(mac_buffer, mac_length))
Exemplo n.º 29
0
Arquivo: sas.py Projeto: bitcard/olm2
    def calculate_mac_long_kdf(self, message, extra_info):
        # type: (str, str) -> str
        """Generate a message authentication code based on the shared secret.

        This function should not be used unless compatibility with an older
        non-tagged Olm version is required.

        Args:
            message (str): The message to produce the authentication code for.
            extra_info (str): Extra information to mix in when generating the
                MAC

        Raises OlmSasError on failure.

        """
        byte_message = to_bytes(message)
        byte_info = to_bytes(extra_info)

        mac_length = lib.olm_sas_mac_length(self._sas)
        mac_buffer = ffi.new("char[]", mac_length)

        self._check_error(
            lib.olm_sas_calculate_mac_long_kdf(
                self._sas,
                ffi.from_buffer(byte_message),
                len(byte_message),
                ffi.from_buffer(byte_info),
                len(byte_info),
                mac_buffer,
                mac_length
            )
        )
        return bytes_to_native_str(ffi.unpack(mac_buffer, mac_length))
Exemplo n.º 30
0
    def parse_lines(self, lines):
        """Parse the properties from the given configuration file lines"""
        for line in lines:
            if isbytes(line):
                line = bytes_to_native_str(line)
            if line.endswith("\n"):
                line = line[:-1]

            if line.startswith(STOP):
                self.cleanup()
                break
            if self.is_excluded(line):
                self.cleanup()
                continue
            elif not line.strip():
                self.cleanup()
                continue
            elif line.startswith("#"):
                self.append(line)
            elif "=" in line and self.curr_a != ESCAPED:
                self.detect(line)
            elif line.endswith("\\"):
                self.cont(line[:-1])
            else:
                self.cont(line)
        self.cleanup()  # Handle no newline at end of file
        return self.properties
Exemplo n.º 31
0
    def __init__(self, uuid):
        self._uuid = copy(BLEUUID.BASE_UUID_BYTES)

        if isinstance(uuid, UUID):
            # Assume that the UUID is correct
            self._uuid = bytearray(uuid.bytes)
        elif isinstance(uuid, bytes):
            self._uuid[2:4] = bytearray(bytes_to_native_str(uuid))
        elif isinstance(uuid, str):
            if len(uuid) == 4:
                # 16-bit UUID
                part = int(uuid, 16).to_bytes(2, 'little')
                self._uuid[2:4] = bytearray(part)
            elif len(uuid) == 8:
                # 32-bit UUID
                part = int(uuid, 16).to_bytes(4, 'little')
                self._uuid[0:4] = bytearray(part)
            elif len(uuid) == 36:
                # 128-bit UUID
                self._uuid = bytearray(UUID(uuid).bytes)
            else:
                raise ValueError("Invalid UUID")
        elif isinstance(uuid, int):
            if uuid < 65536:
                # 16-bit UUID
                part = int(uuid).to_bytes(2, 'little')
                self._uuid[2:4] = bytearray(part)
            elif uuid < 2**32:
                # 32-bit UUID
                part = int(uuid).to_bytes(4, 'little')
                self._uuid[0:4] = bytearray(part)
            else:
                raise ValueError("Invalid UUID")
        else:
            raise ValueError("Invalid UUID (type error)")
Exemplo n.º 32
0
    def from_pickle(cls, pickle, passphrase=""):
        # types: (bytes, str) -> PkDecryption
        if not pickle:
            raise ValueError("Pickle can't be empty")

        byte_key = to_bytes(passphrase)
        key_buffer = ffi.new("char[]", byte_key)
        pickle_buffer = ffi.new("char[]", pickle)

        pubkey_length = lib.olm_pk_key_length()
        pubkey_buffer = ffi.new("char[]", pubkey_length)

        obj = cls.__new__(cls)

        ret = lib.olm_unpickle_pk_decryption(
            obj._pk_decryption,
            key_buffer, len(byte_key),
            pickle_buffer, len(pickle),
            pubkey_buffer, pubkey_length)

        obj._check_error(ret)

        obj.public_key = bytes_to_native_str(ffi.unpack(
            pubkey_buffer,
            pubkey_length
        ))

        return obj
Exemplo n.º 33
0
  def login_for_jwt(self):
    try:
      session = requests.Session()
      session.headers.update({'content-type':'application/json', 'accept': 'application/json'})

      password_prompt = bytes_to_native_str(b"Password :"******"Udacity Login required.")
        email = input('Email :')
        password = getpass.getpass(password_prompt)
        udacity_login(session, self.root_url, email, password)
      elif self.id_provider == 'gt':
        print("GT Login required.")
        username = input('Username :'******'developer':
        print("Developer Login required.")
        username = input('Username :'******'/auth_tokens')
    r.raise_for_status()

    jwt = r.json()['auth_token']

    return jwt
Exemplo n.º 34
0
    def encrypt(self, plaintext):
        # type: (AnyStr) -> PkMessage
        """Encrypt a message.

        Returns the encrypted PkMessage.

        Args:
            plaintext(str): A string that will be encrypted using the
            PkEncryption object.
        """
        byte_plaintext = to_bytearray(plaintext)

        r_length = lib.olm_pk_encrypt_random_length(self._pk_encryption)
        random = URANDOM(r_length)
        random_buffer = ffi.new("char[]", random)

        ciphertext_length = lib.olm_pk_ciphertext_length(
            self._pk_encryption, len(byte_plaintext))
        ciphertext = ffi.new("char[]", ciphertext_length)

        mac_length = lib.olm_pk_mac_length(self._pk_encryption)
        mac = ffi.new("char[]", mac_length)

        ephemeral_key_size = lib.olm_pk_key_length()
        ephemeral_key = ffi.new("char[]", ephemeral_key_size)

        ret = lib.olm_pk_encrypt(self._pk_encryption,
                                 ffi.from_buffer(byte_plaintext),
                                 len(byte_plaintext), ciphertext,
                                 ciphertext_length, mac, mac_length,
                                 ephemeral_key, ephemeral_key_size,
                                 random_buffer, r_length)

        try:
            self._check_error(ret)
        finally:  # pragma: no cover
            # clear out copies of plaintext
            if byte_plaintext is not plaintext:
                for i in range(0, len(byte_plaintext)):
                    byte_plaintext[i] = 0

        message = PkMessage(
            bytes_to_native_str(ffi.unpack(ephemeral_key, ephemeral_key_size)),
            bytes_to_native_str(ffi.unpack(mac, mac_length)),
            bytes_to_native_str(ffi.unpack(ciphertext, ciphertext_length)))
        return message
Exemplo n.º 35
0
 def test_bytes_to_native_str(self):
     """
     Test for issue #47
     """
     b = bytes(b'abc')
     s = bytes_to_native_str(b)
     if PY2:
         self.assertEqual(s, b)
     else:
         self.assertEqual(s, 'abc')
     self.assertTrue(isinstance(s, native_str))
     self.assertEqual(type(s), native_str)
Exemplo n.º 36
0
def filter_smudge():
    # Operate on stdin/stdout in binary mode
    firstline = next(stdin)
    if firstline == b"bigstore\n":
        hash_function_name = next(stdin)
        hexdigest = next(stdin)
        source_filename = object_filename(bytes_to_native_str(hash_function_name)[:-1],
                                          bytes_to_native_str(hexdigest)[:-1])

        try:
            with open(source_filename):
                pass
        except IOError:
            stdout.write(firstline)
            stdout.write(hash_function_name)
            stdout.write(hexdigest)
        else:
            with open(source_filename, 'rb') as file:
                for line in file:
                    stdout.write(line)
    else:
        stdout.write(firstline)
        for line in stdin:
            stdout.write(line)
Exemplo n.º 37
0
    def _get_package_zip(self, path):
        # Build the zip file
        zip_bytes = io.BytesIO()
        zipf = zipfile.ZipFile(zip_bytes, "w", zipfile.ZIP_DEFLATED)

        with cd(path):
            for file_to_package in self._get_files_to_package():
                zipf.write(file_to_package)

        zipf.close()

        zipf_processed = self._process_zip_file(zipfile.ZipFile(zip_bytes))
        fp = zipf_processed.fp
        zipf_processed.close()
        return bytes_to_native_str(base64.b64encode(fp.getvalue()))
Exemplo n.º 38
0
Arquivo: prog.py Projeto: gngj/omerbot
def application(environ, start_response):
    print "app"
    # the environment variable CONTENT_LENGTH may be empty or missing
    try:
        request_body_size = int(environ.get('CONTENT_LENGTH', 0))
    except (ValueError):
        request_body_size = 0

    # When the method is POST the variable will be sent
    # in the HTTP request body which is passed by the WSGI server
    # in the file like wsgi.input environment variable.
    buf = environ['wsgi.input'].read(request_body_size)
    global job_queue

    updater = Updater("207443777:AAGuMP5nIJMqbFKILRmVuuAz8in7PfiWdjA")
    job_queue = updater.job_queue

    # Get the dispatcher to register handlers
    dp = updater.dispatcher

    # on different commands - answer in Telegram
    dp.addHandler(CommandHandler("start", start))
    dp.addHandler(CommandHandler("help", start))
    dp.addHandler(CommandHandler("set", set))
    dp.addHandler(InlineQueryHandler(inlinequery))

    # log all errors
    dp.addErrorHandler(error)
  
    updater.bot.setWebhook("https://afternoon-shelf-83103.herokuapp.com")
    # Start the Bot
    #updater.start_polling()
    
    json_string = bytes_to_native_str(buf)

    update = Update.de_json(json.loads(json_string))
    dp.processUpdate(update)

    start_response('200 OK', [('Content-Type', 'text/plain')])
    return ['']
    # Block until the you presses Ctrl-C or the process receives SIGINT,
    # SIGTERM or SIGABRT. This should be used most of the time, since
    # start_polling() is non-blocking and will stop the bot gracefully.
    import time
Exemplo n.º 39
0
    def do_POST(self):
        self.logger.debug("Webhook triggered")
        try:
            self._validate_post()
            clen = self._get_content_len()
        except _InvalidPost as e:
            self.send_error(e.http_code)
            self.end_headers()
        else:
            buf = self.rfile.read(clen)
            json_string = bytes_to_native_str(buf)

            self.send_response(200)
            self.end_headers()

            self.logger.debug("Webhook received data: " + json_string)

            update = Update.de_json(json.loads(json_string))
            self.logger.info("Received Update with ID %d on Webhook" % update.update_id)
            self.server.update_queue.put(update)
Exemplo n.º 40
0

# Try and install Skyfield data
try:
    from caput import time as ctime

    # Force download of data
    sf = ctime.SkyfieldWrapper(
        ephemeris='http://e-mode.phas.ubc.ca/~jrs65/de421.bsp'
    )
    sf.reload()

    # Set package data to be installed alongside skyfield
    skyfield_data = {
        # TODO: Py3 remove this hack needed to work around a setuptools bug
        bytes_to_native_str(b'caput'): [
            'data/Leap_Second.dat',
            'data/de421.bsp',
            'data/deltat.data',
            'data/deltat.preds'
        ]
    }

except:
    import warnings
    warnings.warn("Could not install additional Skyfield data.")
    skyfield_data = {}

setup(
    name='caput',
    version=__version__,
Exemplo n.º 41
0
 def write(self, data):
     self.ws.send(b64encode(bytes_to_native_str(bytes(data))))
Exemplo n.º 42
0
  ('mf2.json', ['mf2-from-json.html', 'mf2.html'], microformats2.json_to_html,
   # we do not format h-media photos properly in html
   ('note_with_composite_photo',)),
  # not ready yet
  # ('mf2.html', ['as-from-mf2.json', 'as.json'], html_to_activity, ()),
  ('as.json', ['feed-from-as.json', 'feed.json'], activity_to_jsonfeed, ()),
  ('feed.json', ['as-from-feed.json', 'as.json'], jsonfeed_to_activity, ()),
  ('as.json', ['as2-from-as.json', 'as2.json'], as2.from_as1, ()),
  ('as2.json', ['as-from-as2.json', 'as.json'], as2.to_as1, ()),
  ('as.json', ['rss.xml'], rss_from_activities, ()),
)

test_funcs = {}
for src_ext, dst_exts, fn, excludes in mappings:
  for src, dst in filepairs(src_ext, dst_exts):
    if any(dst.startswith(exclude) for exclude in excludes):
      continue

    expected = read(dst)
    original = read(src)
    test_name = (
      'test_%s_%s' % (fn.__name__, src[:-len(src_ext)])
    ).replace('.', '_').replace('-', '_').strip('_')
    test_funcs[test_name] = create_test_function(fn, original, expected)

os.chdir(prevdir)


TestDataTest = type(bytes_to_native_str(b'TestDataTest'), (testutil.TestCase,),
                    test_funcs)
Exemplo n.º 43
0
# -*- coding: utf-8 -*-
"""Wrapper for :mod:`array` to simplify and make future compatible.

Not a complete wrapper. New routines added as required.

:copyright: Copyright (c) 2015 Bivio Software, Inc.  All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from future.utils import bytes_to_native_str

import array

#: Future-proof typecode for double
DOUBLE_TYPECODE = bytes_to_native_str('d')

#: Future-proof typecode for float
FLOAT_TYPECODE = bytes_to_native_str(b'f')

def new_double(*args, **kwargs):
    """Creates a new double ("d") array

    Args are the same as :func:`array.array` except for typecode,
    which is passed by this module.

    Returns:
        array.array: New, initialized array
    """
    return array.array(DOUBLE_TYPECODE, *args, **kwargs)

    def read_fastfood(filename, return_dtype=None, need_header=None):
        if return_dtype is None:
            msg = "Return data-type must be set and a valid numpy data-type"
            raise ValueError(msg)

        import struct
        with open(filename, "rb") as f:
            skip1 = struct.unpack(bytes_to_native_str(b'@i'), f.read(4))[0]
            idat = struct.unpack(bytes_to_native_str(b'@iiiii'),
                                 f.read(20))[0:5]
            skip2 = struct.unpack(bytes_to_native_str(b'@i'), f.read(4))[0]
            assert skip1 == 20 and skip2 == 20,\
                "fast-food file seems to be incorrect (reading idat)"
            ngal = idat[1]

            if need_header is not None:
                # now read fdat
                skip1 = struct.unpack(bytes_to_native_str(b'@i'), f.read(4))[0]
                fdat = struct.unpack(bytes_to_native_str(b'@fffffffff'),
                                     f.read(36))[0:9]
                skip2 = struct.unpack(bytes_to_native_str(b'@i'), f.read(4))[0]
                assert skip1 == 36 and skip2 == 36,\
                    "fast-food file seems to be incorrect (reading fdat )"

                skip1 = struct.unpack(bytes_to_native_str(b'@i'), f.read(4))[0]
                znow = struct.unpack(bytes_to_native_str(b'@f'), f.read(4))[0]
                skip2 = struct.unpack(bytes_to_native_str(b'@i'), f.read(4))[0]
                assert skip1 == 4 and skip2 == 4,\
                    "fast-food file seems to be incorrect (reading redshift)"
            else:
                fdat_bytes = 4 + 36 + 4
                znow_bytes = 4 + 4 + 4
                # seek over the fdat + znow fields + padding bytes
                # from current position
                f.seek(fdat_bytes + znow_bytes, 1)

            # read the padding bytes for the x-positions
            skip1 = struct.unpack(bytes_to_native_str(b'@i'), f.read(4))[0]
            assert skip1 == ngal * 4 or skip1 == ngal * 8, \
                "fast-food file seems to be corrupt (padding bytes)"

            # seek back 4 bytes from current position
            f.seek(-4, 1)
            pos = {}
            for field in 'xyz':
                skip1 = struct.unpack(bytes_to_native_str(b'@i'), f.read(4))[0]
                assert skip1 == ngal * 4 or skip1 == ngal * 8, \
                    "fast-food file seems to be corrupt (padding bytes a)"
                # the next division must be the integer division
                input_dtype = np.float32 if skip1 // ngal == 4 else np.float
                array = np.fromfile(f, input_dtype, ngal)
                skip2 = struct.unpack(bytes_to_native_str(b'@i'), f.read(4))[0]
                pos[field] = array if dtype is None else dtype(array)

        x = pos['x']
        y = pos['y']
        z = pos['z']

        if need_header is not None:
            return idat, fdat, znow, x, y, z
        else:
            return x, y, z
Exemplo n.º 45
0
 def _read_lengths(self):
     """Reads the lengths from persistent storage, if it does not exist, returns an empty array."""
     lengths = array(bytes_to_native_str(b'l'))
     if op.isfile(self._lengths_file):
         _read_full_file(lengths, self._lengths_file)
     return lengths
Exemplo n.º 46
0
"""Python 2 / 3 compatibility layer.

The functions are copied from http://python-future.org/
to avoid the extra dependency.
"""

# TODO: for some reason defining the `bytes_to_native_str` function
# ourselves doesn't work ... maybe they do some monkey buiseness on import?

from future.utils import bytes_to_native_str
#def bytes_to_native_str(b, encoding='utf-8'):
#        return b.decode(encoding)

# See http://python-future.org/stdlib_incompatibilities.html#array-array
ARRAY_DOUBLE_TYPECODE = bytes_to_native_str(b'd')
Exemplo n.º 47
0
def json_rpc_loads(s):
    return json.loads(bytes_to_native_str(s), object_hook=_parse_rpc_message)
Exemplo n.º 48
0
 def e(cls):
     warnings.warn("telegram.Emoji is being deprecated, please see https://git.io/v6DeB")
     return bytes_to_native_str(b)
Exemplo n.º 49
0
 def to_str(self, serial_elem, pretty_print=False,  # @ReservedAssignment @IgnorePep8
            xml_declaration=False, encoding='UTF-8', **kwargs):  # @UnusedVariable  @IgnorePep8
     return bytes_to_native_str(
         etree.tostring(serial_elem, encoding=encoding,
                        pretty_print=pretty_print,
                        xml_declaration=xml_declaration))
Exemplo n.º 50
0
 def _write_handle(self, handle, data, response=True):
     if response:
         return self.requester.write_by_handle(handle, bytes_to_native_str(data))
     else:
         return self.requester.write_without_response_by_handle(handle, bytes_to_native_str(data))
Exemplo n.º 51
0
    def test_get_package_zip(self):
        with temporary_dir() as path:

            # add package.xml
            with open(os.path.join(path, "package.xml"), "w") as f:
                f.write(
                    """<?xml version="1.0" encoding="UTF-8"?>
<Package xmlns="http://soap.sforce.com/2006/04/metadata">
    <version>45.0</version>
</Package>"""
                )

            # add lwc
            lwc_path = os.path.join(path, "lwc")
            os.mkdir(lwc_path)

            # add lwc linting files (not included in zip)
            lwc_ignored_files = [".eslintrc.json", "jsconfig.json"]
            for lwc_ignored_file in lwc_ignored_files:
                touch(os.path.join(lwc_path, lwc_ignored_file))

            # add lwc component
            lwc_component_path = os.path.join(lwc_path, "myComponent")
            os.mkdir(lwc_component_path)

            # add lwc component files included in zip (in alphabetical order)
            lwc_component_files = [
                {"name": "myComponent.html"},
                {"name": "myComponent.js"},
                {
                    "name": "myComponent.js-meta.xml",
                    "body:": """<?xml version="1.0" encoding="UTF-8"?>
<LightningComponentBundle xmlns="http://soap.sforce.com/2006/04/metadata" fqn="myComponent">
    <apiVersion>45.0</apiVersion>
    <isExposed>false</isExposed>
</LightningComponentBundle>""",
                },
                {"name": "myComponent.svg"},
                {"name": "myComponent.css"},
            ]
            for lwc_component_file in lwc_component_files:
                with open(
                    os.path.join(lwc_component_path, lwc_component_file.get("name")),
                    "w",
                ) as f:
                    if lwc_component_file.get("body") is not None:
                        f.write(lwc_component_file.get("body"))

            # add lwc component files not included in zip
            for lwc_ignored_file in lwc_ignored_files:
                touch(os.path.join(lwc_component_path, lwc_ignored_file))

            # add lwc component sub-directory and files not included in zip
            lwc_component_test_path = os.path.join(lwc_component_path, "__tests__")
            os.mkdir(lwc_component_test_path)
            touch(os.path.join(lwc_component_test_path, "test.js"))

            # add classes
            classes_path = os.path.join(path, "classes")
            os.mkdir(classes_path)
            class_files = [
                {
                    "name": "MyClass.cls-meta.xml",
                    "body": """<?xml version="1.0" encoding="UTF-8"?>
<ApexClass xmlns="http://soap.sforce.com/2006/04/metadata">
    <apiVersion>45.0</apiVersion>
    <status>Active</status>
</ApexClass>
""",
                },
                {"name": "MyClass.cls"},
            ]
            for class_file in class_files:
                with open(os.path.join(classes_path, class_file.get("name")), "w") as f:
                    if class_file.get("body") is not None:
                        f.write(class_file.get("body"))

            # add objects
            objects_path = os.path.join(path, "objects")
            os.mkdir(objects_path)
            object_file_names = ["Account.object", "Contact.object", "CustomObject__c"]
            object_file_names.sort()
            for object_file_name in object_file_names:
                touch(os.path.join(objects_path, object_file_name))

            # add sub-directory of objects (that doesn't really exist)
            objects_sub_path = os.path.join(objects_path, "does-not-exist-in-schema")
            os.mkdir(objects_sub_path)
            touch(os.path.join(objects_sub_path, "some.file"))

            # test
            task = create_task(
                Deploy,
                {
                    "path": path,
                    "namespace_tokenize": "ns",
                    "namespace_inject": "ns",
                    "namespace_strip": "ns",
                },
            )

            zip_bytes = io.BytesIO()
            zipf = zipfile.ZipFile(zip_bytes, "w", zipfile.ZIP_DEFLATED)

            with cd(path):
                for file_to_package in task._get_files_to_package():
                    zipf.write(file_to_package)
                zipf.close()

            zipf_processed = task._process_zip_file(zipfile.ZipFile(zip_bytes))
            fp = zipf_processed.fp
            zipf_processed.close()
            expected = bytes_to_native_str(base64.b64encode(fp.getvalue()))

            actual = task._get_package_zip(path)

            self.assertEqual(expected, actual)