Esempio n. 1
0
def parseFile(source:IOBase, logger=logging.getLogger()):
    try:
        return Parser(Lexer(source), logger).parseModule(False)
    except CompilerError as e:
        source.seek(0)
        e.format(source.read())
        raise e
Esempio n. 2
0
def peek(stream: IOBase, chunk_size: int) -> str:
    if hasattr(stream, 'peek'):
        return stream.peek(chunk_size)
    else:
        current_pos = stream.tell()
        result = stream.read(chunk_size)
        stream.seek(current_pos)
        return result
Esempio n. 3
0
def generate_code(atcoder_client: AtCoderClient,
                  problem_url: str,
                  config: Config,
                  output_file: IOBase):
    problem = get_problem_from_url(problem_url)
    template_code_path = config.code_style_config.template_file
    lang = config.code_style_config.lang

    def emit_error(text):
        logging.error(with_color(text, Fore.RED))

    def emit_warning(text):
        logging.warning(text)

    def emit_info(text):
        logging.info(text)

    emit_info('{} is used for template'.format(template_code_path))

    # Fetch problem data from the statement
    try:
        content = atcoder_client.download_problem_content(problem)
    except InputFormatDetectionError as e:
        emit_error("Failed to download input format.")
        raise e
    except SampleDetectionError as e:
        emit_error("Failed to download samples.")
        raise e

    try:
        prediction_result = predict_format(content)
        emit_info(
            with_color("Format prediction succeeded", Fore.LIGHTGREEN_EX))
    except (NoPredictionResultError, MultiplePredictionResultsError) as e:
        prediction_result = FormatPredictionResult.empty_result()
        if isinstance(e, NoPredictionResultError):
            msg = "No prediction -- Failed to understand the input format"
        else:
            msg = "Too many prediction -- Failed to understand the input format"
        emit_warning(with_color(msg, Fore.LIGHTRED_EX))

    constants = predict_constants(content.original_html)
    code_generator = config.code_style_config.code_generator
    with open(template_code_path, "r") as f:
        template = f.read()

    output_splitter()

    output_file.write(code_generator(
        CodeGenArgs(
            template,
            prediction_result.format,
            constants,
            config.code_style_config
        )))
    def _write_xml_output_to_file(self, file_io: io.IOBase):
        """Write the dump from self._dump() to file_io.

        Args:
          file_io: File to write to.

        """
        output_str = self._dump()
        ExtendedCommonRoadFileWriter.check_validity_of_commonroad_file(
            output_str)
        file_io.write(output_str)
Esempio n. 5
0
 def report(
     self,
     fout: IOBase,
     ct_list: List["CellType"],
     space: int = 4,
 ) -> None:
     """ Write putative cell type reports to fout.
     """
     for ct in ct_list:
         fout.write(" " * space + str(ct) + "\n")
         if ct.subtypes is not None:
             self.report(fout, ct.subtypes, space + 4)
Esempio n. 6
0
    def __new__(cls,
                name,
                mode='r',
                buffer_size=None,
                max_buffers=0,
                max_workers=None,
                **kwargs):
        # If call from a subclass, instantiate this subclass directly
        if cls is not AzureBlobBufferedIO:
            return IOBase.__new__(cls)

        # Get subclass
        return IOBase.__new__(AZURE_BUFFERED[_new_blob(cls, name, kwargs)])
Esempio n. 7
0
def calc_size_and_sha265(content: io.IOBase, chunk_size: int):
    """Calculates the size and the sha2566 value of the content."""
    size = 0
    sha256 = hashlib.sha256()
    content.seek(0, io.SEEK_SET)
    while True:
        buf = content.read(chunk_size)
        length = len(buf)
        size += length
        sha256.update(buf)
        if length != chunk_size:
            break
    return size, sha256.hexdigest()
Esempio n. 8
0
def verify(module:Module, builtin:Module, logger = logging.getLogger(), source:IOBase = None):
    # Set up the initial state before verifying
    State.init(builtin, logger.getChild("lekvar"))

    State.logger.info(module.context)

    try:
        module.verify()
    except CompilerError as e:
        if source is not None:
            source.seek(0)
            e.format(source.read())
        raise e
def calc_size_and_sha265(content: io.IOBase, chunk_size: int):
    """Calculates the size and the sha2566 value of the content."""
    size = 0
    sha256 = hashlib.sha256()
    content.seek(0, io.SEEK_SET)
    while True:
        buf = content.read(chunk_size)
        length = len(buf)
        size += length
        sha256.update(buf)
        if length != chunk_size:
            break
    return size, sha256.hexdigest()
Esempio n. 10
0
def download(url: str, stream: io.IOBase, timeout: int, verify: bool):
    """
    Downloads the content of URL into stream.
    This method only supports HTTP and HTTPS URLs.
    The implementation is safe to use for large contents.
    :param url: URL to download.
    :param stream: stream to write content (e.g. file or I/O buffer).
    :param timeout: timeout until server sends data (not the overall download time).
    :param verify: verify server's SSL certificate.
    """
    with requests.get(url, timeout=timeout, verify=verify, stream=True) as r:
        r.raise_for_status()
        for chunk in r.iter_content(chunk_size=1024 * 1024):
            stream.write(chunk)
Esempio n. 11
0
def generate_code(atcoder_client: AtCoderClient, problem_url: str,
                  config: Config, output_file: IOBase):
    problem = get_problem_from_url(problem_url)
    template_code_path = config.code_style_config.template_file
    lang = config.code_style_config.lang

    def emit_error(text):
        logging.error(with_color(text, Fore.RED))

    def emit_warning(text):
        logging.warning(text)

    def emit_info(text):
        logging.info(text)

    emit_info('{} is used for template'.format(template_code_path))

    # Fetch problem data from the statement
    try:
        content = atcoder_client.download_problem_content(problem)
    except InputFormatDetectionError as e:
        emit_error("Failed to download input format.")
        raise e
    except SampleDetectionError as e:
        emit_error("Failed to download samples.")
        raise e

    try:
        prediction_result = predict_format(content)
        emit_info(with_color("Format prediction succeeded",
                             Fore.LIGHTGREEN_EX))
    except (NoPredictionResultError, MultiplePredictionResultsError) as e:
        prediction_result = FormatPredictionResult.empty_result()
        if isinstance(e, NoPredictionResultError):
            msg = "No prediction -- Failed to understand the input format"
        else:
            msg = "Too many prediction -- Failed to understand the input format"
        emit_warning(with_color(msg, Fore.LIGHTRED_EX))

    constants = predict_constants(content.original_html)
    code_generator = config.code_style_config.code_generator
    with open(template_code_path, "r") as f:
        template = f.read()

    output_splitter()

    output_file.write(
        code_generator(
            CodeGenArgs(template, prediction_result.format, constants,
                        config.code_style_config)))
Esempio n. 12
0
 def _log_fit_params(self, f: IOBase):
     f.write("Fit parameters\n")
     f.write("Epochs: {}\n".format(self.epochs))
     f.write("Batch size: {}\n".format(self.batch_size))
     f.write(
         "_________________________________________________________________\n\n"
     )
Esempio n. 13
0
    def __init__(self, handle : io.IOBase, key : Union[Iterable[int], int, str, None]):
        self._init_key(key)

        self._handle = handle
        self._size = 0
        self._pos = 0
        self._buffer = bytearray(8)

        if handle.readable():
            # Don't assert due to BriceIsSmart
            handle.read(12)
            self._size = self._readu32()
        if handle.writable():
            self._write_header()
Esempio n. 14
0
 def _log_directories(self, f: IOBase):
     f.write("Dataset directory: {}\n".format(self.dataset_dir))
     f.write("dtype: {}\n".format(self.dtype))
     f.write("Checkpoints directory: {}\n".format(self.checkpoints_dir))
     f.write(
         "_________________________________________________________________\n\n"
     )
Esempio n. 15
0
    def generate(self, result: scanner.ScannerResult,
                 output: io.IOBase) -> None:
        for decl in filter(
                lambda decl: decl.type.args is not None and any(
                    map(
                        lambda param:
                        (not isinstance(param, EllipsisParam) and utils.
                         is_function_pointer_type(param.type)),
                        decl.type.args.params,
                    )),
                result.declarations,
        ):
            output.write(self._generateTypeDefForDecl(decl))
        for defin in filter(
                lambda defin: defin.decl.type.args is not None and any(
                    map(
                        lambda param:
                        (not isinstance(param, EllipsisParam) and utils.
                         is_function_pointer_type(param.type)),
                        defin.decl.type.args.params,
                    )),
                result.definitions,
        ):
            output.write(self._generateTypeDefForDecl(defin.decl))

        for decl in result.declarations:
            output.write(self._generateFakeForDecl(decl))

        for definition in result.definitions:
            output.write(self._generateBypassForFuncDef(definition))
            output.write(self._generateFakeForDecl(definition.decl))
Esempio n. 16
0
def p2prpc_analyze_large_file(
    video_handle: io.IOBase, arg2: int
) -> {
        "results_file1": io.IOBase,
        "results_file2": io.IOBase,
        "res_var": int
}:
    p2p_progress_hook(80, 100)
    time.sleep(5)
    video_handle.close()
    return {
        "results_file1": open(video_handle.name, 'rb'),
        "results_file2": open(__file__, 'rb'),
        "res_var": 10
    }
Esempio n. 17
0
def write_int(value: int, size: int, signed: bool, writer: io.IOBase):
    """
    Writes an integer into the writer. It is always encoded as a big endian
    value.

    Parameters:
    - `value`: The value to be written;
    - `size`: The size of the value in bytes;
    - `signed`: A flag that determines if the encoding is signed or not;
    - `writer`: The writer;

    It may raise `OverflowError` if the `value` cannot be represented using
    the specfied size.
    """
    writer.write(value.to_bytes(size, byteorder='big', signed=signed))
Esempio n. 18
0
    def __call__(self, stream: io.IOBase):
        values = []
        for _ in range(self.__min_repeats):
            values.append(self.__parser(stream))

        repeats = self.__min_repeats
        while self.__max_repeats is None or repeats < self.__max_repeats:
            pos = stream.tell()
            try:
                values.append(self.__parser(stream))
            except ParseError:
                stream.seek(pos)
                break

        return values
Esempio n. 19
0
    def _serialize(self, out_stream: io.IOBase, data: list):
        if not data:
            return

        encoding = "utf-8"
        delimeter = '\t'

        out_stream.write("value{d}signer{d}sign\n".format(d=delimeter).encode(encoding))

        for row in data:
            out_stream.write("{value}{d}{signer}{d}{sign}\n".format(
                d=delimeter,
                value=json.dumps(row.data),
                signer=str(base64.b64encode(row.signer), encoding='utf-8') if row.signer else "",
                sign=str(base64.b64encode(row.sign), encoding='utf-8') if row.sign else ""
            ).encode(encoding))
def word_list(aff: str, dic: str, base_words_only: bool = False, print_out: bool = True) -> set:
    file = sys.stdout if print_out else IOBase()
    print('Start parse affix file ...', file=file)
    affix = Affix(aff)
    print('Finished parsing affix file', file=file)
    print('Start parse dictionary file ...', file=file)
    dictionary = parse_dictionary(dic, affix.encoding, affix.flag, affix.iconv, affix.oconv)
    print('Finished parsing dictionary file', file=file)
    if base_words_only:
        out = map(lambda d: d.get_word(), dictionary)
    else:
        print('Start generating word list ...', file=file)
        out = deque()
        queue = dictionary
        while len(queue) > 0:
            print('\rnot processed words: {:<10d}'.format(len(queue)), end='', file=file)
            word = queue.popleft()
            if isinstance(word, Word):
                out.append(word.get_word())
                words = _generate_affix_words(word, affix)
                for w in words:
                    queue.append(w)
            else:
                raise ValueError('Invalid Word: {} is type of {}.'.format(word, type(word)))
        print('\rnot processed words: {:<10d}'.format(len(queue)), file=file)
        print('Finished generating word list', file=file)

    word_set = set(out)
    print('generate Words: {:d}'.format(len(word_set)), file=file)
    print(file=file)
    return word_set
Esempio n. 21
0
def get_md5_from_stream(src: io.IOBase) -> str:
    """calculate md5 of src stream. The stream could been 
    from a file(mode='rb')/network-stream/stringio or any other readable
    object in BINARY stream. This method will NOT close the stream! 
    Return the MD5 hex digest number."""
    if not isinstance(src, io.IOBase) or not src.readable():
        raise Exception("src is not stream or unreadable")
    m: hashlib._hashlib.HASH = hashlib.md5()
    while True:
        b = src.read(4096)
        if not b:
            break
        m.update(b)

    res = m.hexdigest()
    return res
Esempio n. 22
0
    def _maybe_fd(self, filelike: io.IOBase) -> None:

        try:
            self.fd = filelike.fileno()

        except (AttributeError, OSError):
            self.fd = None
Esempio n. 23
0
def deserialize_join(
    stream: IOBase,
    content_len: int,
    remote_ip: str,
) -> Tuple[str, str, str]:
    """
    Deserialize data for NameNode.add_node().

    Parameters
    ----------
    stream : IOBase
        Stream of request body.
    content_len : int
        Length of request body.
    remote_ip : str
        IP address of client.

    Returns
    -------
    Tuple[str, str, str]:
        Public ip, access url and id of data node.
    """
    tmp = stream.read(content_len).decode('utf-8').split(' ')
    public_url = None
    if len(tmp) > 2:
        public_url, port, id = tmp
    else:
        port, id = tmp
    if ':' in port:
        remote_ip, port = port.split(':')
    url = 'http://' + remote_ip + ':' + port + '/'
    return public_url, url, id
Esempio n. 24
0
def deserialize_matrix(
    stream: IOBase,
    content_len: int,
    remote_ip: str,
) -> List[List[str]]:
    """
    Deserialize list of lists of strings.

    Parameters
    ----------
    stream : IOBase
        Stream of request body.
    content_len : int
        Length of request body.
    remote_ip : str
        IP address of client.

    Returns
    -------
    List[List[str]]:
        List of lists.
    """
    tmp = stream.read(content_len).decode('utf-8')
    lines = tmp.split('\n')
    return [l.split('\t') for l in lines]
Esempio n. 25
0
def deserialize_stat(
    stream: IOBase,
    content_len: int,
    remote_ip: str,
) -> Tuple[str, int, int]:
    """
    Deserialize tuple returned by stat.

    Parameters
    ----------
    stream : IOBase
        Stream of request body.
    content_len : int
        Length of request body.
    remote_ip : str
        IP address of client.

    Returns
    -------
    Tuple[str, int, int]:
        Full path, size and mode.
    """
    tmp = stream.read(content_len).decode('utf-8')
    tmp = tmp.split()
    return tmp[0], int(tmp[1]), int(tmp[2])
Esempio n. 26
0
def deserialize_tuple(
    stream: IOBase,
    content_len: int,
    remote_ip: str,
) -> Tuple[int, int, int]:
    """
    Deserialize tuple returned by df.

    Parameters
    ----------
    stream : IOBase
        Stream of request body.
    content_len : int
        Length of request body.
    remote_ip : str
        IP address of client.

    Returns
    -------
    Tuple[int, int, int]:
        Total, used and free memory in bytes.
    """
    tmp = stream.read(content_len).decode('utf-8')
    total, used, free = (int(x) for x in tmp.split())
    return total, used, free
Esempio n. 27
0
    def __init__(
        self,
        filelike: io.IOBase,
        mode: str = None,
        encoding: str = None,
        non_blocking: bool = False,
    ) -> None:

        if not isinstance(filelike, io.IOBase):
            raise TypeError("must be a file-like object")

        filelike = self._maybe_text(filelike)
        filelike = self._maybe_bytes(filelike)
        self._maybe_raw(filelike)
        self._maybe_fd(filelike)

        try:
            self.isatty = filelike.isatty()

        except AttributeError:
            self.isatty = os.isatty(self.fd) if self.fd is not None else False

        self.encoding = None

        self._determine_encoding(encoding)

        if non_blocking and self.fd is not None and os.get_blocking(self.fd):
            os.set_blocking(self.fd, False)

        mode, accmode = self._determine_mode(mode)

        mode, accmode = self._maybe_raw_from_fd(mode, accmode)
        mode, accmode = self._maybe_bytes_from_raw(mode, accmode)
        self._maybe_text_from_bytes(mode, accmode)
Esempio n. 28
0
def _read_as_hex(f: io.IOBase, param: str):
    length = int(param)
    read_bytes = f.read(length)
    if len(read_bytes) == 0:
        print(f"end-of-file.")
    else:
        print(read_bytes.hex())
Esempio n. 29
0
def read_data(line: str, f: io.IOBase,
              num_peaks: int) -> Generator[Tuple[float], None, None]:
    mz = intensity = ''
    icol = False  # whether we are in intensity column or not
    peaks_read = 0

    while True:
        for char in line:
            if char in '()[]{}':  # Ignore brackets
                continue
            elif char in ' \t,;:\n':  # Delimiter
                if icol and mz and intensity:
                    mz_f = float(mz)
                    intensity_f = float(intensity)
                    if mz_f > 0:
                        yield mz_f, intensity_f
                    peaks_read += 1
                    if peaks_read >= num_peaks:
                        return
                    mz = intensity = ''
                icol = not icol
            elif not icol:
                mz += char
            else:
                intensity += char

        line = f.readline()
        if not line:
            break

    if icol and mz and intensity:
        yield float(mz), float(intensity)
Esempio n. 30
0
def uncloseable(file: IOBase):
    # Break the close method call - so we can inspect the contents of memory file
    def _close():
        pass

    file.close = _close
    return file
Esempio n. 31
0
def make_non_blocking(file_obj: io.IOBase):
    """make file object non-blocking
    Windows doesn't have the fcntl module, but someone on
    stack overflow supplied this code as an answer, and it works
    http://stackoverflow.com/a/34504971/2893090"""

    if USING_WINDOWS:
        LPDWORD = POINTER(DWORD)
        PIPE_NOWAIT = wintypes.DWORD(0x00000001)

        SetNamedPipeHandleState = windll.kernel32.SetNamedPipeHandleState
        SetNamedPipeHandleState.argtypes = [HANDLE, LPDWORD, LPDWORD, LPDWORD]
        SetNamedPipeHandleState.restype = BOOL

        h = msvcrt.get_osfhandle(file_obj.fileno())  # type: ignore

        res = windll.kernel32.SetNamedPipeHandleState(h, byref(PIPE_NOWAIT),
                                                      None, None)
        if res == 0:
            raise ValueError(WinError())

    else:
        # Set the file status flag (F_SETFL) on the pipes to be non-blocking
        # so we can attempt to read from a pipe with no new data without locking
        # the program up
        fcntl.fcntl(file_obj, fcntl.F_SETFL, os.O_NONBLOCK)
Esempio n. 32
0
 def write(self, stream: io.IOBase) -> None:
     """
     Write the record data to a file
     :param stream: A open file object to write to
     :return: None
     """
     stream.writelines([
         b'@',
         self.name.encode('ascii'),
         b' ' + self.desc1.encode('ascii') if self.desc1 != '' else '',
         b'\n',
         self.seq.encode('ascii'), b'\n',
         b'+ ' + self.desc2.encode('ascii') if self.desc2 != '' else b'+',
         b'\n',
         self.qual.encode('ascii'), b'\n'
     ])
Esempio n. 33
0
    def create_from_buffer(cls, buffer: IOBase, file_header=None):
        if type(buffer) in [bytes, bytearray]:
            buffer = BytesIO(buffer)

            # Read bytes up until the variable-sized data
        base_bytes = buffer.read(cls.TX.offset)
        n_bytes = ctypes.c_uint32.from_buffer_copy(
            base_bytes, cls.NumberOfBytes.offset).value
        n_tx = ctypes.c_uint16.from_buffer_copy(base_bytes,
                                                cls.Ntx.offset).value
        n_rx = ctypes.c_uint16.from_buffer_copy(base_bytes,
                                                cls.Nrx.offset).value

        # Read remaining bytes
        remaining_bytes = buffer.read(n_bytes - cls.TX.offset +
                                      cls.NumberOfBytes.size)

        # Create new class dynamically with string array at the correct size
        new_name = cls.__name__ + '_ntx{}_nrx{}'.format(n_tx, n_rx)
        new_fields = cls._fields_.copy()
        tx_idx = [
            i for i, (name, fieldtype) in enumerate(cls._fields_)
            if name == 'TX'
        ][0]
        rx_idx = [
            i for i, (name, fieldtype) in enumerate(cls._fields_)
            if name == 'RX'
        ][0]
        new_fields[tx_idx] = ('TX', KMRawRangeAngle78_TX * n_tx)
        new_fields[rx_idx] = ('RX', KMRawRangeAngle78_RX * n_rx)
        new_cls = type(new_name, (ctypes.LittleEndianStructure, ), {
            '__str__': cls.__str__,
            '_pack_': cls._pack_,
            '_fields_': new_fields
        })

        all_bytes = base_bytes + remaining_bytes
        obj = new_cls.from_buffer_copy(all_bytes)

        # Checksum (not crc16, but a straight sum of bytes with overflow)
        chk = (sum(all_bytes[new_cls.DatagramType.offset:new_cls.EndID.offset])
               & 0xFFFF)
        if chk != obj.Checksum:
            warning_str = '{}: Checksum failed'.format(cls.__name__)
            warnings.warn(warning_str)

        return obj
Esempio n. 34
0
def _readSl2(stream: io.IOBase, blocksize: int, formver: List[int], strict: bool) -> Frame:
    format = formver[0]
    f = FRAME_FORMATS[format]
    s = struct.calcsize(f)
    here = stream.tell()
    bad = 0
    while True:
        buf = stream.read(s)
        if buf == b'':
            # EOF
            return None
        if len(buf) < s:
            print(f'This is bad. Only got {len(buf)}/{s} bytes=', buf)
            raise NotEnoughDataError('got less bytes than expected during read')
        data = struct.unpack(f, buf)
        if data[0] == here:  # offset is allways first
            if bad > 1:
                logger.warning('got back at offset: %s', here)
            break
        elif here > 0:
            bad += 1
            if bad == 1:
                logger.warning('unexpected offset at offset: %s. will try to find next frame', here)
            if strict:
                raise OffsetError('offset missmatch')
            # jump forward and try to catch next
            here += 1
            stream.seek(here)
            continue
        else:
            raise OffsetError('location does not match expected offset')

    kv = {'headersize': s}
    for i, d in enumerate(FRAME_DEFINITIONS[format]):
        name = d['name']
        if not name == "-":
            kv[name] = data[i]
            if name == 'flags' and FLAG_FORMATS[format]:
                if FLAG_AS_BINARY:
                    kv[name] = f'({kv[name]}) {kv[name]:016b}'
                flagform = FLAG_FORMATS[format]
                flags = data[i]
                for k, v in flagform.items():
                    kv[k] = flags & v == v
    b = Frame(**kv)
    b.packet = stream.read(b.packetsize)
    return b
Esempio n. 35
0
File: bsv.py Progetto: slham/bsv
def dump(f: io.IOBase, xss: List[List[Union[bytes, int, float]]]) -> None:
    buffer = io.BytesIO()
    for xs in xss:
        # write max index
        assert _sizeof[_uint16] == buffer.write(
            struct.pack(_uint16,
                        len(xs) - 1))
        # write types
        for x in xs:
            if isinstance(x, bytes):
                x = _BSV_CHAR
            elif isinstance(x, int):
                x = _BSV_INT
            elif isinstance(x, float):
                x = _BSV_FLOAT
            else:
                assert False
            assert _sizeof[_uint8] == buffer.write(struct.pack(_uint8, x))
        # write sizes
        for x in xs:
            if isinstance(x, bytes):
                x = len(x)
            elif isinstance(x, int):
                x = _sizeof[_bsv_int]
            elif isinstance(x, float):
                x = _sizeof[_bsv_float]
            else:
                assert False
            assert _sizeof[_uint16] == buffer.write(struct.pack(_uint16, x))
        # write vals
        for x in xs:
            if isinstance(x, bytes):
                pass
            elif isinstance(x, int):
                x = struct.pack(_bsv_int, x)
            elif isinstance(x, float):
                x = struct.pack(_bsv_float, x)
            else:
                assert False
            assert len(x) == buffer.write(x)
            assert 1 == buffer.write(b'\0')
    assert _sizeof[_int32] == f.write(
        struct.pack(_int32, len(buffer.getvalue())))
    assert len(
        buffer.getvalue()
    ) < _buffer_size, f'you cant dump more than {_buffer_size} bytes at a time'
    assert len(buffer.getvalue()) == f.write(buffer.getvalue())
Esempio n. 36
0
def text_to_node(self, iio: io.IOBase, parts_list):
    '''
    CNLを読み込み
    '''

    def find_part(name, path):
        if name != None:
            for y in parts_list:
                if y.name == name:
                    return y
        elif path != None:
            for y in parts_list:
                if y.path == path:
                    return y

    index = 0
    while iio.readable():
        line=iio.readline().strip()

        line = line.split(' ')
        if line[0] == 'None':
            index+=1
                
        elif line[0] == '[Name]':
            name = line[1]
                
        elif line[0] == '[Path]':
            if len(line) == 1:
                path = ''
            else:
                path = line[1]
                    
        elif line[0] == '[Child]':
            self.children[index].connect(find_part(name, path))
            text_to_node(self.children[index], iio, parts_list)
            index+=1
          
        elif line[0] == '[Parent]':
            return

        elif line[0] == 'MATERIAL':
            return
Esempio n. 37
0
def read_private_key_file(file_: io.IOBase) -> PKey:
    """Read a private key file.  Similar to :meth:`PKey.from_private_key()
    <paramiko.pkey.PKey.from_private_key>` except it guess the key type.

    :param file_: a stream of the private key to read
    :type file_: :class:`io.IOBase`
    :return: the read private key
    :rtype: :class:`paramiko.pkey.PKery`
    :raise paramiko.ssh_exception.SSHException: when something goes wrong

    """
    classes = PKey.__subclasses__()
    last = len(classes) + 1
    for i, cls in enumerate(classes):
        try:
            return cls.from_private_key(file_)
        except SSHException:
            if i == last:
                raise
            file_.seek(0)
            continue
Esempio n. 38
0
    def create_from_buffer(cls, buffer: IOBase, file_header=None):
        if type(buffer) in [bytes, bytearray]:
            buffer = BytesIO(buffer)

            # Read bytes up until the variable-sized data
        base_bytes = buffer.read(cls.TX.offset)
        n_bytes = ctypes.c_uint32.from_buffer_copy(base_bytes, cls.NumberOfBytes.offset).value
        n_tx = ctypes.c_uint16.from_buffer_copy(base_bytes, cls.Ntx.offset).value
        n_rx = ctypes.c_uint16.from_buffer_copy(base_bytes, cls.Nrx.offset).value

        # Read remaining bytes
        remaining_bytes = buffer.read(n_bytes - cls.TX.offset + cls.NumberOfBytes.size)

        # Create new class dynamically with string array at the correct size
        new_name = cls.__name__ + '_ntx{}_nrx{}'.format(n_tx, n_rx)
        new_fields = cls._fields_.copy()
        tx_idx = [i for i, (name, fieldtype) in enumerate(cls._fields_) if name == 'TX'][0]
        rx_idx = [i for i, (name, fieldtype) in enumerate(cls._fields_) if name == 'RX'][0]
        new_fields[tx_idx] = ('TX', KMRawRangeAngle78_TX * n_tx)
        new_fields[rx_idx] = ('RX', KMRawRangeAngle78_RX * n_rx)
        new_cls = type(new_name, (ctypes.LittleEndianStructure,), {
            '__str__': cls.__str__,
            '_pack_': cls._pack_,
            '_fields_': new_fields
        })

        all_bytes = base_bytes + remaining_bytes
        obj = new_cls.from_buffer_copy(all_bytes)

        # Checksum (not crc16, but a straight sum of bytes with overflow)
        chk = (sum(all_bytes[new_cls.DatagramType.offset:new_cls.EndID.offset]) & 0xFFFF)
        if chk != obj.Checksum:
            warning_str = '{}: Checksum failed'.format(cls.__name__)
            warnings.warn(warning_str)

        return obj
    async def _upload_chunks(
            cls, rfile: BootResourceFile, content: io.IOBase, chunk_size: int,
            progress_callback=None):
        """Upload the `content` to `rfile` in chunks using `chunk_size`."""
        content.seek(0, io.SEEK_SET)
        upload_uri = urlparse(
            cls._handler.uri)._replace(path=rfile._data['upload_uri']).geturl()
        uploaded_size = 0

        insecure = cls._handler.session.insecure
        connector = aiohttp.TCPConnector(verify_ssl=(not insecure))
        session = aiohttp.ClientSession(connector=connector)

        async with session:
            while True:
                buf = content.read(chunk_size)
                length = len(buf)
                if length > 0:
                    uploaded_size += length
                    await cls._put_chunk(session, upload_uri, buf)
                    if progress_callback is not None:
                        progress_callback(uploaded_size / rfile.size)
                if length != chunk_size:
                    break
Esempio n. 40
0
def write_wav(f, channels, sample_width=SAMPLE_WIDTH, raw_samples=False, seekable=None):
	stream = wav_samples(channels, sample_width, raw_samples)
	channel_count = 1 if inspect.isgenerator(channels) else len(channels)

	output_seekable = IOBase.seekable(f) if seekable is None else seekable

	if not output_seekable:
		# protect the non-seekable file, since Wave_write will call tell
		f = NonSeekableFileProxy(f)

	w = wave.open(f)
	w.setparams((
		channel_count,
		sample_width,
		FRAME_RATE,
		0, # setting zero frames, should update automatically as more frames written
		COMPRESSION_TYPE,
		COMPRESSION_NAME
		))

	if not output_seekable:
		if wave_module_patched():
			# set nframes to make wave module write data size of 0xFFFFFFF
			w.setnframes((0xFFFFFFFF - 36) / w.getnchannels() / w.getsampwidth())
			logger.debug("Setting frames to: {0}, {1}".format((w.getnframes()), w._nframes))
		else:
			w.setnframes((0x7FFFFFFF - 36) / w.getnchannels() / w.getsampwidth())
			logger.debug("Setting frames to: {0}, {1}".format((w.getnframes()), w._nframes))

	for chunk in buffer(stream):
		logger.debug("Writing %d bytes..." % len(chunk))
		if output_seekable:
			w.writeframes(chunk)
		else:
			# tell wave module not to update nframes header field
			# if output stream not seekable, e.g. STDOUT to a pipe
			w.writeframesraw(chunk)
	w.close()
    async def create(
            cls, name: str, architecture: str, content: io.IOBase, *,
            title: str="",
            filetype: BootResourceFileType=BootResourceFileType.TGZ,
            chunk_size=(1 << 22), progress_callback=None):
        """Create a `BootResource`.

        Creates an uploaded boot resource with `content`. The `content` is
        uploaded in chunks of `chunk_size`. `content` must be seekable as the
        first pass through the `content` will calculate the size and sha256
        value then the second pass will perform the actual upload.

        :param name: Name of the boot resource. Must be in format 'os/release'.
        :type name: `str`
        :param architecture: Architecture of the boot resource. Must be in
            format 'arch/subarch'.
        :type architecture: `str`
        :param content: Content of the boot resource.
        :type content: `io.IOBase`
        :param title: Title of the boot resource.
        :type title: `str`
        :param filetype: Type of file in content.
        :type filetype: `str`
        :param chunk_size: Size in bytes to upload to MAAS in chunks.
            (Default is 4 MiB).
        :type chunk_size: `int`
        :param progress_callback: Called to inform the current progress of the
            upload. One argument is passed with the progress as a precentage.
            If the resource was already complete and no content
            needed to be uploaded then this callback will never be called.
        :type progress_callback: Callable
        :returns: Create boot resource.
        :rtype: `BootResource`.
        """
        if '/' not in name:
            raise ValueError(
                "name must be in format os/release; missing '/'")
        if '/' not in architecture:
            raise ValueError(
                "architecture must be in format arch/subarch; missing '/'")
        if not content.readable():
            raise ValueError("content must be readable")
        elif not content.seekable():
            raise ValueError("content must be seekable")
        if chunk_size <= 0:
            raise ValueError(
                "chunk_size must be greater than 0, not %d" % chunk_size)

        size, sha256 = calc_size_and_sha265(content, chunk_size)
        resource = cls._object(await cls._handler.create(
            name=name, architecture=architecture, title=title,
            filetype=filetype.value, size=str(size), sha256=sha256))
        newest_set = max(resource.sets, default=None)
        assert newest_set is not None
        resource_set = resource.sets[newest_set]
        assert len(resource_set.files) == 1
        rfile = list(resource_set.files.values())[0]
        if rfile.complete:
            # Already created and fully up-to-date.
            return resource
        else:
            # Upload in chunks and reload boot resource.
            await cls._upload_chunks(
                rfile, content, chunk_size, progress_callback)
            return cls._object.read(resource.id)
Esempio n. 42
0
def compile(input:IOBase, output:IOBase, logger = logging.getLogger()):
    output.write(_compile(input, logger).decode("UTF-8"))
 def close(self):
     if self._buffer:
         self._buffer.close()
     self._wrapped_stream = None
     IOBase.close(self)
Esempio n. 44
0
        self.assertEqual(len(errors), 1)
        self.assertIsInstance(errors[0], RuntimeError)



class FakeFile(list):

    def write(self, bytes):
        self.append(bytes)


    def flush(self):
        pass


IOBase.register(FakeFile)



class EvilStr:
    def __str__(self):
        1 // 0



class EvilRepr:
    def __str__(self):
        return "Happy Evil Repr"


    def __repr__(self):
Esempio n. 45
0
def compileRun(input:IOBase, output:IOBase = None, logger = logging.getLogger()):
    source = _compile(input, logger)
    if output is not None:
        output.write(source.decode("UTF-8"))
    return llvm.run(source).decode("UTF-8")
Esempio n. 46
0
def process_text(db: Database,
                 source: Source,
                 text: IOBase) -> Optional[Exception]:
    session = get_session(db)
    line_no = 1 # lol
    ultimate_text = ''
    futures = []
    source.content = ''
    session.add(source)
    session.commit() # so we can attach phrases to it. need its id.
    line_queue = Queue()
    error_queue = Queue()
    db_proc = Process(target=line_handler,
                      args=(db, line_queue, error_queue, source.id))
    db_proc.start()

    chunk = text.read(CHUNK_SIZE)
    while len(chunk) > 0:
        line_buff = ""
        for c in chunk:
            if BAD_CHARS.get(c, False):
                if not line_buff.endswith(' '):
                    line_buff += ' '
                continue
            if CLAUSE_MARKERS.get(c, False):
                if len(line_buff) > LONG_ENOUGH:
                    ultimate_text += line_buff
                    line_queue.put((line_no, line_buff))
                    line_no += 1
                    line_buff = ""
                else:
                    line_buff += c
                continue
            if SENTENCE_MARKERS.get(c, False):
                if len(line_buff) > LONG_ENOUGH:
                    ultimate_text += line_buff
                    line_queue.put((line_no, line_buff))
                    line_no += 1
                line_buff = ""
                continue
            if c == ' ' and line_buff.endswith(' '):
                continue
            if c == "'" and line_buff.endswith(' '):
                continue
            if c == "'" and peek(text, 1) == ' ':
                continue
            line_buff += c
        chunk = text.read(CHUNK_SIZE)

    line_queue.put(DONE_READING)
    db_proc.join()

    error = None
    if error_queue.empty():
        source.content = ultimate_text
        session.add(source)
    else:
        error = error_queue.get()
        session.delete(source)

    result = None
    if error is None:
        result = source.id
    else:
        result = error

    session.commit()
    session.close()

    return result