def test_read_text2(conn):
    conn.send_request('GET', '/send_%d_bytes' % len(DUMMY_DATA))
    conn.read_response()
    fh = TextIOWrapper(conn)

    # This used to fail because TextIOWrapper can't deal with bytearrays
    fh.read(42)
def test_read_text2(conn):
    conn.send_request('GET', '/send_%d_bytes' % len(DUMMY_DATA))
    conn.read_response()
    fh = TextIOWrapper(conn)

    # This used to fail because TextIOWrapper can't deal with bytearrays
    fh.read(42)
Example #3
0
    def __init__(self, file_name, lang_str):
        self.lang = lang_str
        self.ok = False
        self.message = ''
        DATA_ROOT = file_name.split('.')[0]

        utdata_zip = ZipFile(file_name)

        self._no_info_str = "no info"
        self._blocks = []
        with utdata_zip.open(DATA_ROOT + '/data/blocks.txt') as f:
            f = TextIOWrapper(f, encoding='utf-8')
            self._blocks = re.findall(
                r'\[(.*)\]\n\s*diap\s*:\s([0-9A-F]{4,5}):([0-9A-F]{4,5})',
                f.read())
            # _blocks element: (block-name, first-hex-code, last-hex-code)
        print('{0} ranges loaded from "{1}"'.format(len(self._blocks),
                                                    utdata_zip.filename))

        self._table = {}
        self._symbol_list = [self._no_info_str] * 0x100000

        target_str = '/loc/' + lang_str + '/symbols/'
        for name in utdata_zip.namelist():
            if (target_str in name) and (name[-1] != '/'):
                with utdata_zip.open(name) as f:
                    f = TextIOWrapper(f, encoding='utf-8')
                    symbols = re.findall(r'([0-9A-F]{4,5}): (.+)', f.read())
                    # symbols: (hex_code, localized_symbol_name)
                    for s in symbols:
                        self._symbol_list[int(s[0], 16)] = s[1]

        block_names = {}
        with utdata_zip.open(DATA_ROOT + '/loc/' + lang_str +
                             '/blocks.txt') as f:
            f = TextIOWrapper(f, encoding='utf-8')
            for pair in re.findall('(.*):(.*)', f.read()):
                # pair: (block_name, localized_block_name)
                block_names[pair[0].strip()] = pair[1].strip()

        for block in self._blocks:
            block_name = block_names[block[0]]
            first = int(block[1], 16)
            if first > 0x1FFFF:
                continue
            last = int(block[2], 16)
            block_table = {}
            for i in range(first, last + 1):
                if self._symbol_list[i] != self._no_info_str:
                    block_table[i] = self._symbol_list[i]
            # no empty tables
            if len(block_table) > 0:
                self._table[block_name] = block_table
            else:
                print('empty block: ', block_name)
        print('UnicodeTable "{0}" data loaded from "{1}"'.format(
            lang_str, utdata_zip.filename))
        utdata_zip.close()
        self.ok = True
def test_get_file_size_text_file():
    from io import TextIOWrapper
    test_inner_file = BytesIO()
    test_file = TextIOWrapper(test_inner_file, encoding="utf-8")
    test_file.write(u"\u0001F3A9 " * 123)
    test_file.seek(0)
    # read 9 *unicode chars* to advance fd to somewhere interesting
    test_file.read(9)

    previous_pos = test_file.tell()

    assert get_file_size(test_file) == 738
    assert test_file.tell() == previous_pos
Example #5
0
 def test_io_wrapper(self):
     content = "vive l'été\n"
     with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file:
         test_file.write(content.encode())
         test_file.seek(0)
         wrapper = TextIOWrapper(test_file, 'utf-8', newline='\n')
         self.assertEqual(wrapper.read(), content)
         wrapper.write(content)
         wrapper.seek(0)
         self.assertEqual(wrapper.read(), content * 2)
         test_file = wrapper.detach()
         test_file.seek(0)
         self.assertEqual(test_file.read(), (content * 2).encode())
Example #6
0
 def test_io_wrapper(self):
     content = "vive l'été\n"
     with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file:
         test_file.write(content.encode())
         test_file.seek(0)
         wrapper = TextIOWrapper(test_file, 'utf-8', newline='\n')
         self.assertEqual(wrapper.read(), content)
         wrapper.write(content)
         wrapper.seek(0)
         self.assertEqual(wrapper.read(), content * 2)
         test_file = wrapper.detach()
         test_file.seek(0)
         self.assertEqual(test_file.read(), (content * 2).encode())
def parse(stdin: io.TextIOWrapper) -> tuple:
    """
    Parse the input into a tuple containing dict of ranges and an array of
    tickets. The first element of the dict is my ticket.
    """

    [raw_fields, my_ticket,
     other_tickets] = [v.strip() for v in stdin.read().split("\n\n")]

    _, my_ticket = my_ticket.split("\n", 1)
    _, other_tickets = other_tickets.split("\n", 1)

    fields = dict()
    for line in raw_fields.splitlines():
        name, value = line.split(": ")
        fields[name] = [
            tuple(int(i) for i in group.split("-"))
            for group in value.split(" or ")
        ]

    tickets = [{
        key: int(field)
        for key, field in enumerate(my_ticket.split(","))
    }] + [{key: int(field)
           for key, field in enumerate(ticket.split(","))}
          for ticket in other_tickets.splitlines()]

    return fields, tickets
Example #8
0
def update_app(uri: str,
               user: str,
               password: str,
               name: str,
               description: str,
               file: TextIOWrapper,
               tags: List_string,
               app_id: str,
               start_on_install: bool = False):
    token = login(uri, user, password)
    if len(token) == 0:
        return

    uri = (GET_APPS_V1_API + "/{}").format(uri, app_id)
    headers = get_authorized_headers(token)
    body = {
        "name":
        name,
        "description":
        description,
        "configuration_file":
        "data:text/yaml;base64," +
        standard_b64encode(file.read()).decode('utf-8'),
        "start_on_install":
        start_on_install,
        "tags":
        tags
    }

    add_app_request = requests.put(uri, headers=headers, data=json.dumps(body))
    response = add_app_request.json()

    print(json.dumps(response))
Example #9
0
def get_reader(handle: io.TextIOWrapper):
    logger.debug(f"Reading '{handle.name}' content")

    sniffer = csv.Sniffer()
    dialect = sniffer.sniff(handle.read(2048))
    handle.seek(0)
    return csv.reader(handle, dialect=dialect)
Example #10
0
def load(file: io.TextIOWrapper, loaded: List[str] = [], inc_dir: List[str] = []) -> List[str]:
    s = file.read()
    inc_dir = inc_dir + ["."]

    src = list(filter(len, s.split()))

    i = 0
    while i < len(src):
        if src[i] == "import":
            j = i + 1
            if not (src[j] in loaded):
                d = ""
                f = src[i + 1] + ".erp"
                for d2 in reversed(inc_dir):
                    f2 = os.path.join(d2, f)

                    if os.path.isfile(f2):
                        d = d2
                        f = f2

                        break

                if d == "":
                    raise Exception(f"can not open {f}")

                with io.open(f) as f:
                    src2 = load(f, loaded)
            else:
                src2 = []

            src = src[:i] + src2 + src[i + 2:]
        else:
            i += 1

    return src
Example #11
0
def load_lmc_file(
        fp: TextIOWrapper) -> Tuple[List[Instruction], Tuple[MemAddr]]:
    """
    will load a lmc file, allowing the use
    of $mnemonic or $numbered file types

        :param fp: the open file to read from
        :raises UnknownFileHeader: if a unknown header was found
        :return: the list of instructions
    """
    raw_rows = fp.read().replace(" ", "").split("\n")

    # get the header row e.g. $mnemonic -> mnemonic
    header = raw_rows.pop(0).replace("$", "")

    # remove blank rows and comments
    raw_rows = sanitize_rows(raw_rows)

    # decide what type of lmc file it is
    if header == FileHeaders.MNEMONIC:
        return load_mnemonic(raw_rows)

    elif header == FileHeaders.NUMBERED:
        return load_numbered(raw_rows)

    raise UnknownFileHeader(f"Unknown header type {header}")
Example #12
0
def decode_bytes_from_file(the_file: TextIOWrapper, search_variable_name: str):
    search_variable_name = search_variable_name.strip()
    search_var_name = re.match(r'^(.*?)(?:_base(\d\d))?$',
                               search_variable_name)
    var_base_name = str(search_var_name[1])
    encode_bases = [str(search_var_name[2])
                    ] if search_var_name.lastindex > 1 else ('64', '85', '32',
                                                             '16')
    saved_file_position = 0
    if the_file.seekable():
        saved_file_position = the_file.tell()
        the_file.seek(0)
    file_content = the_file.read()
    if the_file.seekable():
        the_file.seek(saved_file_position)
    for enc in encode_bases:
        reg_exp = var_base_name + "_base" + str(
            enc) + r"\s*=\s*[a-zA-Z]{0,2}'''(.*?)'''"
        var_found = re.match(reg_exp, file_content, re.DOTALL)
        if var_found:
            if hasattr(base64, 'b' + enc + 'decode'):
                decoded = getattr(base64, 'b' + enc + 'decode')(var_found[1])
                return var_base_name, bytes(decoded)
            return None, f'Variable found with unsupported encoding: base{enc}'
    return None, 'Variable not found'
Example #13
0
        def wrapper(*args, **kwargs):
            args = list(args)
            if arg is not None:
                try:
                    _file = args[arg[1]]

                    def update_arg(new_val):
                        args[arg[1]] = new_val
                except IndexError:
                    _file = kwargs[arg[0]]

                    def update_arg(new_val):
                        kwargs[arg[0]] = new_val
            else:
                _file = args[0]

                def update_arg(new_val):
                    args[0] = new_val

            cleanup_textio = False
            try:
                if 'r' in mode and hasattr(_file, 'read'):
                    # Check if opened in the correct mode
                    # and wrap in conversion layer if not
                    if _file.read(0) != '' and 'b' not in mode:
                        _file = TextIOWrapper(_file)
                        cleanup_textio = True
                    elif _file.read(0) != b'' and 'b' in mode:
                        raise NotImplementedError("Cannot convert a text file"
                                                  " handle to binary mode")
                    update_arg(_file)
                    return func(*args, **kwargs)
                elif 'w' in mode and hasattr(_file, 'write'):
                    if 'b' not in mode:
                        try:
                            _file.write('')
                        except TypeError:
                            _file = TextIOWrapper(_file)
                            cleanup_textio = True
                    else:
                        try:
                            _file.write(b'')
                        except TypeError:
                            raise NotImplementedError(
                                "Cannot convert a text file"
                                " handle to binary mode")
                    update_arg(_file)
                    return func(*args, **kwargs)
            finally:
                # TextIOWrapper closes the underlying stream unless detached
                if cleanup_textio:
                    _file.detach()

            # This is a path
            _open = open
            if compression:
                _open = open_compressed
            with _open(_file, mode) as f_handle:
                update_arg(f_handle)
                return func(*args, **kwargs)
Example #14
0
def read_py_url(url, errors='replace', skip_encoding_cookie=True):
    """Read a Python file from a URL, using the encoding declared inside the file.
    
    Parameters
    ----------
    url : str
      The URL from which to fetch the file.
    errors : str
      How to handle decoding errors in the file. Options are the same as for
      bytes.decode(), but here 'replace' is the default.
    skip_encoding_cookie : bool
      If True (the default), and the encoding declaration is found in the first
      two lines, that line will be excluded from the output - compiling a
      unicode string with an encoding declaration is a SyntaxError in Python 2.
    
    Returns
    -------
    A unicode string containing the contents of the file.
    """
    response = urllib.urlopen(url)
    buffer = io.BytesIO(response.read())
    encoding, lines = detect_encoding(buffer.readline)
    buffer.seek(0)
    text = TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True)
    text.mode = 'r'
    if skip_encoding_cookie:
        return "".join(strip_encoding_cookie(text))
    else:
        return text.read()
Example #15
0
def parse(stdin: io.TextIOWrapper) -> [int]:
    """
    Parse the input into a list of ints, representing the public keys of the
    card and the door.
    """

    return tuple(int(line) for line in stdin.read().strip().splitlines())
Example #16
0
def source_to_unicode(txt, errors='replace', skip_encoding_cookie=True):
    """Converts a bytes string with python source code to unicode.

    Unicode strings are passed through unchanged. Byte strings are checked
    for the python source file encoding cookie to determine encoding.
    txt can be either a bytes buffer or a string containing the source
    code.
    """
    if isinstance(txt, unicode_type):
        return txt
    if isinstance(txt, bytes):
        buffer = BytesIO(txt)
    else:
        buffer = txt
    try:
        encoding, _ = detect_encoding(buffer.readline)
    except SyntaxError:
        encoding = "ascii"
    buffer.seek(0)
    text = TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True)
    text.mode = 'r'
    if skip_encoding_cookie:
        return u"".join(strip_encoding_cookie(text))
    else:
        return text.read()
Example #17
0
def find_xmp_string(file: io.TextIOWrapper):
    """
    Load chunks of an input image iteratively and search for the XMP data.

    On each iteration, a new chunk of the file (of size specified by xmp.CHUNK_SIZE) is read and
    appended to the already read portion of the file. The XMP regex is then matched against this string,
    and if the XMP data is found, returns the match. If no match is found, the function continues.

    :param file: Handler to file open for reading
    :return: **xmp_data**: XMP data of image, as string dump
    """
    file_so_far = ""
    while True:
        chunk = file.read(CHUNK_SIZE)

        # If at end of file, chunk will be None
        if not chunk:
            logger.error(
                "Couldn't parse XMP string from the image file. The image may not have XMP information."
            )
            raise XMPTagNotFoundError(
                "Couldn't parse XMP string from the image file.")

        start_search_at = max(0,
                              len(file_so_far) -
                              12)  # 12 is the length of the ending XMP tag
        file_so_far += chunk

        end_match = re.search(XMP_END, file_so_far[start_search_at:])
        # If we matched the end, we know `file_so_far` contains the whole XMP string
        if end_match:
            return re.search(FULL_XMP, file_so_far).group(0)
Example #18
0
def load_geojson():
    result = urlopen('https://github.com/openregister/boundaries/archive/master.zip').read()
    stream = BytesIO(result)
    zipfile = ZipFile(stream, 'r')
    file_names = [name for name in zipfile.namelist()
                  if name.endswith('.geojson')]
    for name in file_names:
        with zipfile.open(name, 'r') as f:
            if name.endswith('.geojson'):
                file_contents = TextIOWrapper(f, encoding='utf-8',
                                          newline='')
                data = geojson.loads(file_contents.read())
                try:
                    name = data['properties']['REGD14NM']
                    code = data['properties']['REGD14CD']
                    geometry = data['geometry']
                    # hackery store everthing as multipolygon
                    if geometry['type'] == 'Polygon':
                        coordinates = []
                        coordinates.append(geometry['coordinates'])
                        geometry['coordinates'] = coordinates
                        geometry['type'] = 'MultiPolygon'
                    polygon = from_shape(asShape(geometry), srid=4326)
                    boundary = Boundary(name=name, code=code, polygon=polygon)
                    db.session.add(boundary)
                    db.session.commit()
                except KeyError as e:
                    print("not something we were expecting really")
Example #19
0
def get_cfmodel(template: TextIOWrapper) -> CFModel:
    template_file = convert_json_or_yaml_to_dict(template.read())
    if not template_file:
        raise FileEmptyException(
            f"{template.name} is empty and not a valid template.")
    cfmodel = pycfmodel.parse(template_file)
    return cfmodel
Example #20
0
def read_json(fileobj):
    '''read the file object (a json)
    '''
    json_file = TextIOWrapper(fileobj)
    content = json.loads(json_file.read())
    fileobj.close()    
    return content
def parse(stdin: io.TextIOWrapper) -> list:
    """
    Parse the input into a list of tuples: string direction and int distance.
    """

    return [(line[0], int(line[1:]))
            for line in stdin.read().strip().splitlines()]
Example #22
0
 def test_io_wrapper(self):
     content = "vive l'été\n"
     with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file:
         test_file.write(content.encode('utf-8'))
         test_file.seek(0)
         wrapper = TextIOWrapper(test_file, 'utf-8', newline='\n')
         self.assertEqual(wrapper.read(), content)
         # The following seek() call is required on Windows Python 2 when
         # switching from reading to writing.
         wrapper.seek(0, 2)
         wrapper.write(content)
         wrapper.seek(0)
         self.assertEqual(wrapper.read(), content * 2)
         test_file = wrapper.detach()
         test_file.seek(0)
         self.assertEqual(test_file.read(), (content * 2).encode('utf-8'))
Example #23
0
def addAdmin(request):
    context = dict()
    if request.method == 'POST':
        if not (request.FILES):
            return self.construct_form(request, True, False)
        f = TextIOWrapper(request.FILES['CSVFile'].file,
                          encoding=request.encoding)
        reader = csv.reader(f.read().splitlines())
        for row in reader:
            context = check_csv(row, 1)
            if context != False:
                return render(request, 'main/error.html', context)

            try:
                user_instance = User.objects.create_user(username=row[0],
                                                         password='******')
                Admin.objects.create(user=user_instance)
            except IntegrityError:
                continue
        return render(request, 'main/tables.html', view_data('Admin'))
    else:
        form = FileForm()
        context['form'] = form
        context['name'] = 'Adminstrator'
        return render(request, 'main/upload.html', context)
Example #24
0
def struncate(file: TextIOWrapper, amount: int):
    """
    Truncate the first n bytes from the beginning of file

    :param file
    :param amount: amount of bytes to remove from start
    :type file: TextIOWrapper
    :type amount: int
    """
    #Get file size
    file.seek(0, 2)
    file_size = file.tell()
    #Go to the beginning of file
    file_offset = amount
    file.seek(0, 0)
    bytes_to_write = file_size - amount
    bytes_written = 0
    while bytes_written < bytes_to_write:
        #Move to offset + bytes_written
        file.seek(file_offset + bytes_written, 0)
        #Get bytes to rewrite
        block_size = 1024
        if bytes_to_write - bytes_written < block_size:
            block_size = bytes_to_write - bytes_written
        #Read block
        block_data = file.read(block_size)
        #Move to the beginning of file + bytes_written
        file.seek(bytes_written, 0)
        #Write block
        bytes_written += file.write(block_data)
    #Then truncate
    file.flush() #Flush write first
    file.seek(bytes_written)
    file.truncate()
Example #25
0
def get_extra_env_vars(
        gh_event: GitHubEvent, token: str, app: int,
        private_key: TextIOWrapper,
) -> dict:
    """Construct additional env vars for App or Action processing."""
    env = {}

    if app is not None:
        env['OCTOMACHINERY_APP_MODE'] = 'app'

        env['GITHUB_APP_IDENTIFIER'] = str(app)
        env['GITHUB_PRIVATE_KEY'] = private_key.read()
        return env

    env['OCTOMACHINERY_APP_MODE'] = 'action'

    env['GITHUB_ACTION'] = 'Fake CLI Action'
    env['GITHUB_ACTOR'] = gh_event.payload.get('sender', {}).get('login', '')
    env['GITHUB_EVENT_NAME'] = gh_event.name
    env['GITHUB_WORKSPACE'] = str(pathlib.Path('.').resolve())
    env['GITHUB_SHA'] = gh_event.payload.get('head_commit', {}).get('id', '')
    env['GITHUB_REF'] = gh_event.payload.get('ref', '')
    env['GITHUB_REPOSITORY'] = (
        gh_event.payload.
        get('repository', {}).
        get('full_name', '')
    )
    env['GITHUB_TOKEN'] = token
    env['GITHUB_WORKFLOW'] = 'Fake CLI Workflow'
    env['GITHUB_EVENT_PATH'] = '/dev/null'

    return env
Example #26
0
def main():
    for task, digest in CVEDA_PSYTOOLS_DATASETS:
        digest = digest.upper().replace(' ', '_')
        dataset = 'IMAGEN-{task}-{digest}.csv'.format(task=task, digest=digest)
        logging.info('downloading: {0}'.format(dataset))
        url = BASE_URL + dataset + '.gz'
        # let Requests use ~/.netrc instead of passing an auth parameter
        #     auth = requests.auth.HTTPBasicAuth('...', '...')
        # no need to expose identifiers in the code!
        r = requests.get(url, verify=CA_BUNDLE)
        compressed_data = BytesIO(r.content)
        with gzip.GzipFile(fileobj=compressed_data) as uncompressed_data:
            # unfold quoted text spanning multiple lines
            uncompressed_data = TextIOWrapper(uncompressed_data)
            data = QUOTED_PATTERN.sub(lambda x: x.group().replace('\n', '/'),
                                      uncompressed_data.read())
            # skip files that have not changed since last update
            psytools_path = os.path.join(PSYTOOLS_PSC1_DIR, dataset)
            if os.path.isfile(psytools_path):
                with open(psytools_path, 'r') as uncompressed_file:
                    if uncompressed_file.read() == data:
                        logging.info('skip unchanged file: {0}'
                                     .format(psytools_path))
                        continue
            # write downloaded data into file
            with open(psytools_path, 'w') as uncompressed_file:
                logging.info('write file: {0}'.format(psytools_path))
                uncompressed_file.write(data)
Example #27
0
def source_to_unicode(txt, errors='replace', skip_encoding_cookie=True):
    """Converts a bytes string with python source code to unicode.

    Unicode strings are passed through unchanged. Byte strings are checked
    for the python source file encoding cookie to determine encoding.
    txt can be either a bytes buffer or a string containing the source
    code.
    """
    if isinstance(txt, unicode):
        return txt
    if isinstance(txt, bytes):
        buffer = BytesIO(txt)
    else:
        buffer = txt
    try:
        encoding, _ = detect_encoding(buffer.readline)
    except SyntaxError:
        encoding = "ascii"
    buffer.seek(0)
    text = TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True)
    text.mode = 'r'
    if skip_encoding_cookie:
        return u"".join(strip_encoding_cookie(text))
    else:
        return text.read()
def parse(stdin: io.TextIOWrapper) -> list:
    """Parse a raw passport input."""

    return [
        dict(x.split(":") for x in re.split(r"[\n ]", y))
        for y in stdin.read().strip().split("\n\n")
    ]
Example #29
0
 def readback(f: io.TextIOWrapper) -> None:
     pos = f.tell()
     while pos >= 0:
         f.seek(pos)
         if f.read(1) == "\n":
             break
         pos -= 2
Example #30
0
def addCourseProfessor(request):
    context = dict()
    if request.method == 'POST':
        if not (request.FILES):
            return self.construct_form(request, True, False)
        f = TextIOWrapper(request.FILES['CSVFile'].file,
                          encoding=request.encoding)
        reader = csv.reader(f.read().splitlines())
        for row in reader:
            try:
                prof = Professor.objects.filter(fullname=row[0])[0]
                # prof = Professor.objects.filter(user__username=row[0])[0]
                for each in row[1:]:
                    if each != '':
                        course = Course.objects.filter(name=each)[0]
                        CourseProfessor.objects.create(course=course,
                                                       professor=prof)
            except IntegrityError:
                continue
        return render(request, 'main/tables.html',
                      view_data('CourseProfessor'))
    else:
        form = FileForm()
        context['form'] = form
        context['name'] = 'Course Professor relations'
        return render(request, 'main/upload.html', context)
Example #31
0
 def import_csv(self, request):
     if request.method == "POST":
         try:
             csv_file = TextIOWrapper(request.FILES['csv_file'].file,
                                      encoding=request.encoding)
             dialect = csv.Sniffer().sniff(csv_file.read(1024))
             csv_file.seek(0)
             reader = csv.DictReader(csv_file, dialect=dialect)
         except Exception as err:
             self.message_user(request, "Error: {}".format(err))
             return redirect("..")
         try:
             if '/student/' in request.path:
                 user_type = Student
             elif '/faculty/' in request.path:
                 user_type = Faculty
             else:
                 raise Http404
             create_users(user_type, reader)
         except Exception as err:
             messages.error(
                 request, f'Error on row number {reader.line_num}: {err}')
         else:
             messages.success(request, "Your csv file has been imported")
         return redirect("..")
     form = BulkImportForm()
     payload = {"form": form}
     return render(request, "admin/bulk_import_form.html", payload)
def read_py_url(url, errors='replace', skip_encoding_cookie=True):
    """Read a Python file from a URL, using the encoding declared inside the file.
    
    Parameters
    ----------
    url : str
      The URL from which to fetch the file.
    errors : str
      How to handle decoding errors in the file. Options are the same as for
      bytes.decode(), but here 'replace' is the default.
    skip_encoding_cookie : bool
      If True (the default), and the encoding declaration is found in the first
      two lines, that line will be excluded from the output - compiling a
      unicode string with an encoding declaration is a SyntaxError in Python 2.
    
    Returns
    -------
    A unicode string containing the contents of the file.
    """
    response = urllib.request.urlopen(url)
    buffer = io.BytesIO(response.read())
    encoding, lines = detect_encoding(buffer.readline)
    buffer.seek(0)
    text = TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True)
    text.mode = 'r'
    if skip_encoding_cookie:
        return "".join(strip_encoding_cookie(text))
    else:
        return text.read()
Example #33
0
def upload():
    if current_user.is_authenticated:
        if request.method == 'POST':
            # the user cannot upload a file unless they enter collection name
            if not request.form['file_collection']:
                # takes user back to upload a file
                return redirect(url_for('upload'))
            else:
                csv_file = request.files[
                    'csvfiles']  # request the file being uploaded
                filename = secure_filename(
                    csv_file.filename)  # secure the file
                csv_files = TextIOWrapper(
                    csv_file, encoding='utf-8')  # parse through file
                read_csv = csv_files.read()  # read csv file

                # insert file information in UserFiles table and to display in file log
                new_csv = UserFiles(
                    file_name=filename,
                    file_collection=request.form['file_collection'])

                # create list of dictionaries keyed by header row
                csv_dicts = [{k: v
                              for k, v in row.items()}
                             for row in csv.DictReader(read_csv.splitlines(),
                                                       skipinitialspace=True)]

                db.session.add(new_csv)  # insert file to database
                db.session.bulk_insert_mappings(
                    Data, csv_dicts)  # bulk insert data to database
                db.session.commit()  # commits
                flash('Data was added!')
            return redirect(url_for('show_all_files'))
        return render_template('upload.html')
Example #34
0
def extract_ip_from_html_player(html_filehandle: io.TextIOWrapper):
    """Returns 0.0.0.0 if no ip address is found in the html file"""
    html_file = html_filehandle.read()
    ip_containing = re.search(r"(?:\\u0026v|%26|%3F)ip(?:%3D|=)(.*?)(?:,|;|%26|\\u0026)", html_file, re.DOTALL)
    if ip_containing is None:
        return "0.0.0.0"
    return ip_containing.group(1)
Example #35
0
 def test_partial_decode_wait(self):
     reader = StreamReader()
     wrapped = TextIOWrapper(reader, 'utf-8')
     buf = u'20 \u20ac'.encode('utf-8')
     reader.feed(buf[:-1])
     def write_last():
         gruvi.sleep(0.01)
         reader.feed(buf[-1:])
     gruvi.spawn(write_last)
     self.assertEqual(wrapped.read(4), u'20 \u20ac')
def test_read_text3(conn):
    conn.send_request('GET', '/send_%d_bytes' % len(DUMMY_DATA))
    conn.read_response()
    fh = TextIOWrapper(conn)

    # This used to fail because TextIOWrapper tries to read from
    # the underlying fh even after getting ''
    while True:
        if not fh.read(77):
            break

    assert not conn.response_pending()
Example #37
0
def main(args=None):
    parser = create_parser()
    args = parser.parse_args(args)

    if args.filename == '-':  # read from stdin
        if PY2:
            data = getreader(args.encoding)(sys.stdin).read()
        else:
            wrapper = TextIOWrapper(sys.stdin.buffer, encoding=args.encoding)
            try:
                data = wrapper.read()
            finally:
                wrapper.detach()
    else:
        try:
            with open(args.filename, 'r', args.encoding) as f:
                data = ''.join(f.readlines())
        except IOError as e:
            return _error(
                u'Failed to read {0}: {1}'.format(args.filename, e))

    close_stream = False
    if args.outfile:
        try:
            stream = open(args.outfile, 'w', args.encoding)
            close_stream = True
        except IOError as e:
            return _error(u'Failed to open {0}: {1}'.format(args.outfile, e))
    else:
        stream = sys.stdout

    formatter_opts = vars(args)
    try:
        formatter_opts = sqlparse.formatter.validate_options(formatter_opts)
    except SQLParseError as e:
        return _error(u'Invalid options: {0}'.format(e))

    s = sqlparse.format(data, **formatter_opts)
    stream.write(s)
    stream.flush()
    if close_stream:
        stream.close()
    return 0
Example #38
0
def csv_config(request):
    headers = None
    content = {}
    f = TextIOWrapper(request.FILES['config'].file, encoding='utf-8')
    reader = csv.reader(f.read().splitlines())
    # credits goes to http://www.eurion.net/python-snippets/snippet/CSV%20to%20Dictionary.html
    for row in reader:
        if reader.line_num == 1:
            headers = row[1:]
        else:
            content[row[0]] = dict(zip(headers, row[1:]))
    special_users = []
    projects = set()
    for username in content:
        user, created = User.objects.get_or_create(username=username)
        user.email = content[username]['Email']
        user.first_name = content[username]['First Name']
        user.last_name = content[username]['Last Name']
        user.profile.phone = content[username]['Phone']
        user.profile.skype = content[username]['Skype']
        user.profile.comment = content[username]['Comment']
        user.save()
        project = content[username]['Project']
        sub_project = content[username]['SubProject']
        if project != "*" and sub_project != "*":
            project, created = Project.objects.get_or_create(name=project + "-" + sub_project)
            project.created_by = request.user
            project.add_user(user, content[username]["Role"])
            project.save()
            projects.add(project)
        else:
            special_users.append(
                {"user": user, "role": content[username]["Role"], "project": project, "subproject": sub_project})
    for special_user in special_users:
        for project in projects:
            if (special_user['project'] == "*" and special_user['subproject'] == "*") \
                    or (special_user['project'] == "*" and project.name.endswith(special_user['subproject'])) \
                    or (special_user['subproject'] == "*" and project.name.startswith(special_user['project'])):
                project.add_user(special_user['user'], special_user['role'])
    for project in projects:
        ProjectAdmin.export_graph(project, request.user)
    return Response({"success": True})
Example #39
0
def test_corrupted_pad():
    """Tests that a login path file with a corrupted pad is partially read."""
    buf = open_bmylogin_cnf(LOGIN_PATH_FILE)

    # Skip past the login key
    buf.seek(24)

    # Skip option group
    len_buf = buf.read(4)
    cipher_len, = struct.unpack("<i", len_buf)
    buf.read(cipher_len)

    # Corrupt the pad for the user line
    len_buf = buf.read(4)
    cipher_len, = struct.unpack("<i", len_buf)
    buf.read(cipher_len - 1)
    buf.write(b'\0')

    buf.seek(0)
    mylogin_cnf = TextIOWrapper(read_and_decrypt_mylogin_cnf(buf))
    contents = mylogin_cnf.read()
    for word in ('[test]', 'password', 'host', 'port'):
        assert word in contents
    assert 'user' not in contents
Example #40
0
def collect(proc):
    stream = TextIOWrapper(proc.stdout,errors='ignore')
    output = stream.read()
    wait(proc)

    return output
def test_read_text(conn):
    conn.send_request('GET', '/send_%d_bytes' % len(DUMMY_DATA))
    conn.read_response()
    fh = TextIOWrapper(conn)
    assert fh.read() == DUMMY_DATA.decode('utf8')
    assert not conn.response_pending()
'''
Title    -  Adding or Changing the Encoding of an Already Open File   
Problem  -  
'''

# io.TextIOWrapper() 
# add Unicode encoding/decoding to an existing file object
# that's opened in binary mode  
from urllib.request import urlopen 
from io import TextIOWrapper
u = urlopen('http://www.python.org')
f = TextIOWrapper(u, encoding='utf-8')
text = f.read()

print('\n!---SECTION---\n')

# detach()
# remove the existing text encoding layer
# replace it with a new one 
# sys.stdout 
import sys 
print(sys.stdout.encoding)
# breaks output of a terminal
# sys.stdout = TextIOWrapper(sys.stdout.detach(), encoding='latin-1')
# print(sys.stdout.encoding)

print('\n!---SECTION---\n')

# I/O 
f = open('sample.txt', 'w')
# text 
Example #43
0
 def _convert_to_unicode(text):
   start = time()
   fh = TextIOWrapper(BytesIO(text), encoding='utf-8', errors='replace')
   logging.debug("Converting text to unicode took %f seconds", time()-start)
   return fh.read()
Example #44
0
 def read_from_io(self, file: io.TextIOWrapper):
     # todo: Biztos jó ötlet az egész fájlt beolvasni? Esetleg yield? NEM.
     return self.read(file.read())
Example #45
0
 def test_read_eof(self):
     stream = Stream(None)
     wrapped = TextIOWrapper(stream, 'utf-8')
     stream.buffer.feed(b'foo')
     stream.buffer.feed_eof()
     self.assertEqual(wrapped.read(), 'foo')
Example #46
0
 def test_simple(self):
     reader = StreamReader()
     wrapped = TextIOWrapper(reader, 'utf-8')
     reader.feed(b'foo')
     self.assertEqual(wrapped.read(3), 'foo')
Example #47
0
 def test_read_eof(self):
     reader = StreamReader()
     wrapped = TextIOWrapper(reader, 'utf-8')
     reader.feed(b'foo')
     reader.feed_eof()
     self.assertEqual(wrapped.read(), 'foo')
Example #48
-1
 def _cmd_process(self, cmd):
     cmd = cmd.strip().replace("\n", ";")
     if sys.version_info >= (3, 0):
         self.process.stdin.write(bytes(cmd + '\n', 'utf-8'))
         # XXX: Use the TextIOWrapper or we can get stuck in an endless loop!
         r = getattr(self, '_process_stdout_wrapper', None)
         if r is None:
             r = TextIOWrapper(self.process.stdout, encoding='utf8')
             self._process_stdout_wrapper = r
     else:
         self.process.stdin.write(cmd + '\n')
         r = self.process.stdout
     self.process.stdin.flush()
     out = ''
     while True:
         if self.nonblocking:
             try:
                 foo = r.read(4096)
             except:
                 continue
         else:
             foo = r.read(1)
         if len(foo) > 0 and foo[-1] == '\x00':
             out += foo[0:-1]
             break
         out += foo
     return out
Example #49
-1
class FileObjectPosix(object):
    """
    A file-like object that operates on non-blocking files but
    provides a synchronous, cooperative interface.

    .. note::
         Random read/write (e.g., ``mode='rwb'``) is not supported.
         For that, use :class:`io.BufferedRWPair` around two instance of this
         class.

    .. tip::
         Although this object provides a :meth:`fileno` method and
         so can itself be passed to :func:`fcntl.fcntl`, setting the
         :data:`os.O_NONBLOCK` flag will have no effect; likewise, removing
         that flag will cause this object to no longer be cooperative.
    """

    #: platform specific default for the *bufsize* parameter
    default_bufsize = io.DEFAULT_BUFFER_SIZE

    def __init__(self, fobj, mode='rb', bufsize=-1, close=True):
        """
        :keyword fobj: Either an integer fileno, or an object supporting the
            usual :meth:`socket.fileno` method. The file *will* be
            put in non-blocking mode using :func:`gevent.os.make_nonblocking`.
        :keyword str mode: The manner of access to the file, one of "rb", "rU" or "wb"
            (where the "b" or "U" can be omitted).
            If "U" is part of the mode, IO will be done on text, otherwise bytes.
        :keyword int bufsize: If given, the size of the buffer to use. The default
            value means to use a platform-specific default, and a value of 0 is translated
            to a value of 1. Other values are interpreted as for the :mod:`io` package.
            Buffering is ignored in text mode.
        """
        if isinstance(fobj, int):
            fileno = fobj
            fobj = None
        else:
            fileno = fobj.fileno()
        if not isinstance(fileno, int):
            raise TypeError('fileno must be int: %r' % fileno)

        orig_mode = mode
        mode = (mode or 'rb').replace('b', '')
        if 'U' in mode:
            self._translate = True
            mode = mode.replace('U', '')
        else:
            self._translate = False

        if len(mode) != 1 and mode not in 'rw': # pragma: no cover
            # Python 3 builtin `open` raises a ValueError for invalid modes;
            # Python 2 ignores it. In the past, we raised an AssertionError, if __debug__ was
            # enabled (which it usually was). Match Python 3 because it makes more sense
            # and because __debug__ may not be enabled.
            # NOTE: This is preventing a mode like 'rwb' for binary random access;
            # that code was never tested and was explicitly marked as "not used"
            raise ValueError('mode can only be [rb, rU, wb], not %r' % (orig_mode,))

        self._fobj = fobj
        self._closed = False
        self._close = close

        self.fileio = GreenFileDescriptorIO(fileno, mode, closefd=close)

        if bufsize < 0 or bufsize == 1:
            bufsize = self.default_bufsize
        elif bufsize == 0:
            bufsize = 1

        if mode == 'r':
            self.io = BufferedReader(self.fileio, bufsize)
        else:
            assert mode == 'w'
            self.io = BufferedWriter(self.fileio, bufsize)
        #else: # QQQ: not used, not reachable
        #
        #    self.io = BufferedRandom(self.fileio, bufsize)

        if self._translate:
            self.io = TextIOWrapper(self.io)

    @property
    def closed(self):
        """True if the file is cloed"""
        return self._closed

    def close(self):
        if self._closed:
            # make sure close() is only run once when called concurrently
            return
        self._closed = True
        try:
            self.io.close()
            self.fileio.close()
        finally:
            self._fobj = None

    def flush(self):
        self.io.flush()

    def fileno(self):
        return self.io.fileno()

    def write(self, data):
        self.io.write(data)

    def writelines(self, lines):
        self.io.writelines(lines)

    def read(self, size=-1):
        return self.io.read(size)

    def readline(self, size=-1):
        return self.io.readline(size)

    def readlines(self, sizehint=0):
        return self.io.readlines(sizehint)

    def readable(self):
        return self.io.readable()

    def writable(self):
        return self.io.writable()

    def seek(self, *args, **kwargs):
        return self.io.seek(*args, **kwargs)

    def seekable(self):
        return self.io.seekable()

    def tell(self):
        return self.io.tell()

    def truncate(self, size=None):
        return self.io.truncate(size)

    def __iter__(self):
        return self.io

    def __getattr__(self, name):
        # XXX: Should this really be _fobj, or self.io?
        # _fobj can easily be None but io never is
        return getattr(self._fobj, name)
Example #50
-1
class FileObjectPosix(object):
    """
    A file-like object that operates on non-blocking files.

    .. seealso:: :func:`gevent.os.make_nonblocking`
    """
    default_bufsize = io.DEFAULT_BUFFER_SIZE

    def __init__(self, fobj, mode='rb', bufsize=-1, close=True):
        """
        :param fobj: Either an integer fileno, or an object supporting the
            usual :meth:`socket.fileno` method. The file will be
            put in non-blocking mode.
        """
        if isinstance(fobj, int):
            fileno = fobj
            fobj = None
        else:
            fileno = fobj.fileno()
        if not isinstance(fileno, int):
            raise TypeError('fileno must be int: %r' % fileno)

        mode = (mode or 'rb').replace('b', '')
        if 'U' in mode:
            self._translate = True
            mode = mode.replace('U', '')
        else:
            self._translate = False
        assert len(mode) == 1, 'mode can only be [rb, rU, wb]'

        self._fobj = fobj
        self._closed = False
        self._close = close

        self.fileio = GreenFileDescriptorIO(fileno, mode, closefd=close)

        if bufsize < 0:
            bufsize = self.default_bufsize
        if mode == 'r':
            if bufsize == 0:
                bufsize = 1
            elif bufsize == 1:
                bufsize = self.default_bufsize
            self.io = BufferedReader(self.fileio, bufsize)
        elif mode == 'w':
            if bufsize == 0:
                bufsize = 1
            elif bufsize == 1:
                bufsize = self.default_bufsize
            self.io = BufferedWriter(self.fileio, bufsize)
        else:
            # QQQ: not used
            self.io = BufferedRandom(self.fileio, bufsize)
        if self._translate:
            self.io = TextIOWrapper(self.io)

    @property
    def closed(self):
        """True if the file is cloed"""
        return self._closed

    def close(self):
        if self._closed:
            # make sure close() is only ran once when called concurrently
            return
        self._closed = True
        try:
            self.io.close()
            self.fileio.close()
        finally:
            self._fobj = None

    def flush(self):
        self.io.flush()

    def fileno(self):
        return self.io.fileno()

    def write(self, data):
        self.io.write(data)

    def writelines(self, lines):
        self.io.writelines(lines)

    def read(self, size=-1):
        return self.io.read(size)

    def readline(self, size=-1):
        return self.io.readline(size)

    def readlines(self, sizehint=0):
        return self.io.readlines(sizehint)

    def seek(self, *args, **kwargs):
        return self.io.seek(*args, **kwargs)

    def seekable(self):
        return self.io.seekable()

    def tell(self):
        return self.io.tell()

    def truncate(self, size=None):
        return self.io.truncate(size)

    def __iter__(self):
        return self.io

    def __getattr__(self, name):
        return getattr(self._fobj, name)
class FileObjectPosix(object):
    """
    A file-like object that operates on non-blocking files but
    provides a synchronous, cooperative interface.

    .. caution::
         This object is most effective wrapping files that can be used appropriately
         with :func:`select.select` such as sockets and pipes.

         In general, on most platforms, operations on regular files
         (e.g., ``open('/etc/hosts')``) are considered non-blocking
         already, even though they can take some time to complete as
         data is copied to the kernel and flushed to disk (this time
         is relatively bounded compared to sockets or pipes, though).
         A :func:`~os.read` or :func:`~os.write` call on such a file
         will still effectively block for some small period of time.
         Therefore, wrapping this class around a regular file is
         unlikely to make IO gevent-friendly: reading or writing large
         amounts of data could still block the event loop.

         If you'll be working with regular files and doing IO in large
         chunks, you may consider using
         :class:`~gevent.fileobject.FileObjectThread` or
         :func:`~gevent.os.tp_read` and :func:`~gevent.os.tp_write` to bypass this
         concern.

    .. note::
         Random read/write (e.g., ``mode='rwb'``) is not supported.
         For that, use :class:`io.BufferedRWPair` around two instance of this
         class.

    .. tip::
         Although this object provides a :meth:`fileno` method and
         so can itself be passed to :func:`fcntl.fcntl`, setting the
         :data:`os.O_NONBLOCK` flag will have no effect; however, removing
         that flag will cause this object to no longer be cooperative.

    .. versionchanged:: 1.1
       Now uses the :mod:`io` package internally. Under Python 2, previously
       used the undocumented class :class:`socket._fileobject`. This provides
       better file-like semantics (and portability to Python 3).
    """

    #: platform specific default for the *bufsize* parameter
    default_bufsize = io.DEFAULT_BUFFER_SIZE

    def __init__(self, fobj, mode='rb', bufsize=-1, close=True):
        """
        :keyword fobj: Either an integer fileno, or an object supporting the
            usual :meth:`socket.fileno` method. The file *will* be
            put in non-blocking mode using :func:`gevent.os.make_nonblocking`.
        :keyword str mode: The manner of access to the file, one of "rb", "rU" or "wb"
            (where the "b" or "U" can be omitted).
            If "U" is part of the mode, IO will be done on text, otherwise bytes.
        :keyword int bufsize: If given, the size of the buffer to use. The default
            value means to use a platform-specific default, and a value of 0 is translated
            to a value of 1. Other values are interpreted as for the :mod:`io` package.
            Buffering is ignored in text mode.
        """
        if isinstance(fobj, int):
            fileno = fobj
            fobj = None
        else:
            fileno = fobj.fileno()
        if not isinstance(fileno, int):
            raise TypeError('fileno must be int: %r' % fileno)

        orig_mode = mode
        mode = (mode or 'rb').replace('b', '')
        if 'U' in mode:
            self._translate = True
            mode = mode.replace('U', '')
        else:
            self._translate = False

        if len(mode) != 1 and mode not in 'rw': # pragma: no cover
            # Python 3 builtin `open` raises a ValueError for invalid modes;
            # Python 2 ignores it. In the past, we raised an AssertionError, if __debug__ was
            # enabled (which it usually was). Match Python 3 because it makes more sense
            # and because __debug__ may not be enabled.
            # NOTE: This is preventing a mode like 'rwb' for binary random access;
            # that code was never tested and was explicitly marked as "not used"
            raise ValueError('mode can only be [rb, rU, wb], not %r' % (orig_mode,))

        self._fobj = fobj
        self._closed = False
        self._close = close

        self.fileio = GreenFileDescriptorIO(fileno, mode, closefd=close)

        if bufsize < 0 or bufsize == 1:
            bufsize = self.default_bufsize
        elif bufsize == 0:
            bufsize = 1

        if mode == 'r':
            self.io = BufferedReader(self.fileio, bufsize)
        else:
            assert mode == 'w'
            self.io = BufferedWriter(self.fileio, bufsize)
        #else: # QQQ: not used, not reachable
        #
        #    self.io = BufferedRandom(self.fileio, bufsize)

        if self._translate:
            self.io = TextIOWrapper(self.io)

    @property
    def closed(self):
        """True if the file is closed"""
        return self._closed

    def close(self):
        if self._closed:
            # make sure close() is only run once when called concurrently
            return
        self._closed = True
        try:
            self.io.close()
            self.fileio.close()
        finally:
            self._fobj = None

    def flush(self):
        self.io.flush()

    def fileno(self):
        return self.io.fileno()

    def write(self, data):
        self.io.write(data)

    def writelines(self, lines):
        self.io.writelines(lines)

    def read(self, size=-1):
        return self.io.read(size)

    def readline(self, size=-1):
        return self.io.readline(size)

    def readlines(self, sizehint=0):
        return self.io.readlines(sizehint)

    def readable(self):
        """
        .. versionadded:: 1.1b2
        """
        return self.io.readable()

    def writable(self):
        """
        .. versionadded:: 1.1b2
        """
        return self.io.writable()

    def seek(self, *args, **kwargs):
        return self.io.seek(*args, **kwargs)

    def seekable(self):
        return self.io.seekable()

    def tell(self):
        return self.io.tell()

    def truncate(self, size=None):
        return self.io.truncate(size)

    def __iter__(self):
        return self.io

    def __getattr__(self, name):
        # XXX: Should this really be _fobj, or self.io?
        # _fobj can easily be None but io never is
        return getattr(self._fobj, name)
Example #52
-1
class FileObjectPosix:
    default_bufsize = io.DEFAULT_BUFFER_SIZE

    def __init__(self, fobj, mode='rb', bufsize=-1, close=True):
        if isinstance(fobj, int):
            fileno = fobj
            fobj = None
        else:
            fileno = fobj.fileno()
        if not isinstance(fileno, int):
            raise TypeError('fileno must be int: %r' % fileno)

        mode = (mode or 'rb').replace('b', '')
        if 'U' in mode:
            self._translate = True
            mode = mode.replace('U', '')
        else:
            self._translate = False
        assert len(mode) == 1, 'mode can only be [rb, rU, wb]'

        self._fobj = fobj
        self._closed = False
        self._close = close

        self.fileio = GreenFileDescriptorIO(fileno, mode, closefd=close)

        if bufsize < 0:
            bufsize = self.default_bufsize
        if mode == 'r':
            if bufsize == 0:
                bufsize = 1
            elif bufsize == 1:
                bufsize = self.default_bufsize
            self.io = BufferedReader(self.fileio, bufsize)
        elif mode == 'w':
            if bufsize == 0:
                bufsize = 1
            elif bufsize == 1:
                bufsize = self.default_bufsize
            self.io = BufferedWriter(self.fileio, bufsize)
        else:
            # QQQ: not used
            self.io = BufferedRandom(self.fileio, bufsize)
        if self._translate:
            self.io = TextIOWrapper(self.io)

    @property
    def closed(self):
        """True if the file is cloed"""
        return self._closed

    def close(self):
        if self._closed:
            # make sure close() is only ran once when called concurrently
            return
        self._closed = True
        try:
            self.io.close()
            self.fileio.close()
        finally:
            self._fobj = None

    def flush(self):
        self.io.flush()

    def fileno(self):
        return self.io.fileno()

    def write(self, data):
        self.io.write(data)

    def writelines(self, list):
        self.io.writelines(list)

    def read(self, size=-1):
        return self.io.read(size)

    def readline(self, size=-1):
        return self.io.readline(size)

    def readlines(self, sizehint=0):
        return self.io.readlines(sizehint)

    def __iter__(self):
        return self.io
class FileObjectPosix(object):
    """
    A file-like object that operates on non-blocking files.

    .. seealso:: :func:`gevent.os.make_nonblocking`
    """
    default_bufsize = io.DEFAULT_BUFFER_SIZE

    def __init__(self, fobj, mode='rb', bufsize=-1, close=True):
        """
        :param fobj: Either an integer fileno, or an object supporting the
            usual :meth:`socket.fileno` method. The file will be
            put in non-blocking mode.
        """
        if isinstance(fobj, int):
            fileno = fobj
            fobj = None
        else:
            fileno = fobj.fileno()
        if not isinstance(fileno, int):
            raise TypeError('fileno must be int: %r' % fileno)

        orig_mode = mode
        mode = (mode or 'rb').replace('b', '')
        if 'U' in mode:
            self._translate = True
            mode = mode.replace('U', '')
        else:
            self._translate = False
        if len(mode) != 1:
            # Python 3 builtin `open` raises a ValueError for invalid modes;
            # Python 2 ignores in. In the past, we raised an AssertionError, if __debug__ was
            # enabled (which it usually was). Match Python 3 because it makes more sense
            # and because __debug__ may not be enabled
            raise ValueError('mode can only be [rb, rU, wb], not %r' % (orig_mode,))

        self._fobj = fobj
        self._closed = False
        self._close = close

        self.fileio = GreenFileDescriptorIO(fileno, mode, closefd=close)

        if bufsize < 0:
            bufsize = self.default_bufsize
        if mode == 'r':
            if bufsize == 0:
                bufsize = 1
            elif bufsize == 1:
                bufsize = self.default_bufsize
            self.io = BufferedReader(self.fileio, bufsize)
        elif mode == 'w':
            if bufsize == 0:
                bufsize = 1
            elif bufsize == 1:
                bufsize = self.default_bufsize
            self.io = BufferedWriter(self.fileio, bufsize)
        else:
            # QQQ: not used
            self.io = BufferedRandom(self.fileio, bufsize)
        if self._translate:
            self.io = TextIOWrapper(self.io)

    @property
    def closed(self):
        """True if the file is cloed"""
        return self._closed

    def close(self):
        if self._closed:
            # make sure close() is only ran once when called concurrently
            return
        self._closed = True
        try:
            self.io.close()
            self.fileio.close()
        finally:
            self._fobj = None

    def flush(self):
        self.io.flush()

    def fileno(self):
        return self.io.fileno()

    def write(self, data):
        self.io.write(data)

    def writelines(self, lines):
        self.io.writelines(lines)

    def read(self, size=-1):
        return self.io.read(size)

    def readline(self, size=-1):
        return self.io.readline(size)

    def readlines(self, sizehint=0):
        return self.io.readlines(sizehint)

    def readable(self):
        return self.io.readable()

    def writable(self):
        return self.io.writable()

    def seek(self, *args, **kwargs):
        return self.io.seek(*args, **kwargs)

    def seekable(self):
        return self.io.seekable()

    def tell(self):
        return self.io.tell()

    def truncate(self, size=None):
        return self.io.truncate(size)

    def __iter__(self):
        return self.io

    def __getattr__(self, name):
        # XXX: Should this really be _fobj, or self.io?
        # _fobj can easily be None but io never is
        return getattr(self._fobj, name)