def test_read_modified_cached_file(self):
        self.mox.StubOutWithMock(os.path, "getmtime")
        self.mox.StubOutWithMock(builtins, 'open')
        os.path.getmtime(mox.IgnoreArg()).AndReturn(2)

        fake_contents = "lorem ipsum"
        fake_file = self.mox.CreateMockAnything()
        fake_file.read().AndReturn(fake_contents)
        fake_context_manager = self.mox.CreateMockAnything()
        fake_context_manager.__enter__().AndReturn(fake_file)
        fake_context_manager.__exit__(mox.IgnoreArg(),
                                      mox.IgnoreArg(),
                                      mox.IgnoreArg())

        builtins.open(mox.IgnoreArg()).AndReturn(
            fake_context_manager)

        self.mox.ReplayAll()
        fileutils._FILE_CACHE = {
            '/this/is/a/fake': {"data": 1123, "mtime": 1}
        }

        fresh, data = fileutils.read_cached_file("/this/is/a/fake")
        self.assertEqual(data, fake_contents)
        self.assertTrue(fresh)
示例#2
0
    def test_read_modified_cached_file(self):
        self.mox.StubOutWithMock(os.path, "getmtime")
        self.mox.StubOutWithMock(builtins, 'open')
        os.path.getmtime(mox.IgnoreArg()).AndReturn(2)

        fake_contents = "lorem ipsum"
        fake_file = self.mox.CreateMockAnything()
        fake_file.read().AndReturn(fake_contents)
        fake_context_manager = mock.Mock()
        fake_context_manager.__enter__ = mock.Mock(return_value=fake_file)
        fake_context_manager.__exit__ = mock.Mock(return_value=False)
        builtins.open(mox.IgnoreArg()).AndReturn(fake_context_manager)

        self.mox.ReplayAll()
        cache_data = {"data": 1123, "mtime": 1}
        self.reload_called = False

        def test_reload(reloaded_data):
            self.assertEqual(reloaded_data, fake_contents)
            self.reload_called = True

        data = utils.read_cached_file("/this/is/a/fake", cache_data,
                                                reload_func=test_reload)
        self.assertEqual(data, fake_contents)
        self.assertTrue(self.reload_called)
示例#3
0
    def test_read_modified_cached_file(self):
        self.mox.StubOutWithMock(os.path, "getmtime")
        self.mox.StubOutWithMock(__builtin__, 'open')
        os.path.getmtime(mox.IgnoreArg()).AndReturn(2)

        fake_contents = "lorem ipsum"
        fake_file = self.mox.CreateMockAnything()
        fake_file.read().AndReturn(fake_contents)
        fake_context_manager = self.mox.CreateMockAnything()
        fake_context_manager.__enter__().AndReturn(fake_file)
        fake_context_manager.__exit__(mox.IgnoreArg(),
                                      mox.IgnoreArg(),
                                      mox.IgnoreArg())

        __builtin__.open(mox.IgnoreArg()).AndReturn(fake_context_manager)

        self.mox.ReplayAll()
        cache_data = {"data": 1123, "mtime": 1}
        self.reload_called = False

        def test_reload(reloaded_data):
            self.assertEqual(reloaded_data, fake_contents)
            self.reload_called = True

        data = utils.read_cached_file(
            "/this/is/a/fake", cache_data, reload_func=test_reload)
        self.assertEqual(data, fake_contents)
        self.assertTrue(self.reload_called)
示例#4
0
    def update_config(self):
        """
        Update the configuration files according to the current
        in-memory SExtractor configuration.
        """

        # -- Write filter configuration file

        # First check the filter itself

        filter = self.config['FILTER_MASK']
        rows = len(filter)
        cols = len(filter[0])  # May raise ValueError, OK

        filter_f = __builtin__.open(self.config['FILTER_NAME'], 'w')
        filter_f.write("CONV NORM\n")
        filter_f.write("# %dx%d Generated from sextractor.py module.\n" %
                       (rows, cols))
        for row in filter:
            filter_f.write(" ".join(map(repr, row)))
            filter_f.write("\n")

        filter_f.close()

        # -- Write parameter list file

        parameters_f = __builtin__.open(self.config['PARAMETERS_NAME'], 'w')
        for parameter in self.config['PARAMETERS_LIST']:
            print(parameter, file=parameters_f)

        parameters_f.close()

        # -- Write NNW configuration file

        nnw_f = __builtin__.open(self.config['STARNNW_NAME'], 'w')
        nnw_f.write(nnw_config)
        nnw_f.close()

        # -- Write main configuration file

        main_f = __builtin__.open(self.config['CONFIG_FILE'], 'w')

        for key in self.config.keys():
            if (key in SExtractor._SE_config_special_keys):
                continue

            if (key == "PHOT_AUTOPARAMS"):  # tuple instead of a single value
                value = " ".join(map(str, self.config[key]))
            else:
                value = str(self.config[key])

            print(("%-16s       %-16s # %s" %
                   (key, value, SExtractor._SE_config[key]['comment'])),
                  file=main_f)

        main_f.close()
示例#5
0
    def update_config(self):
        """
        Update the configuration files according to the current
        in-memory SExtractor configuration.
        """

        # -- Write filter configuration file

        # First check the filter itself

        filter = self.config['FILTER_MASK']
        rows = len(filter)
        cols = len(filter[0])   # May raise ValueError, OK

        filter_f = __builtin__.open(self.config['FILTER_NAME'], 'w')
        filter_f.write("CONV NORM\n")
        filter_f.write("# %dx%d Generated from sextractor.py module.\n" %
                       (rows, cols))
        for row in filter:
            filter_f.write(" ".join(map(repr, row)))
            filter_f.write("\n")

        filter_f.close()

        # -- Write parameter list file

        parameters_f = __builtin__.open(self.config['PARAMETERS_NAME'], 'w')
        for parameter in self.config['PARAMETERS_LIST']:
            print(parameter, file=parameters_f)

        parameters_f.close()

        # -- Write NNW configuration file

        nnw_f = __builtin__.open(self.config['STARNNW_NAME'], 'w')
        nnw_f.write(nnw_config)
        nnw_f.close()

        # -- Write main configuration file

        main_f = __builtin__.open(self.config['CONFIG_FILE'], 'w')

        for key in self.config.keys():
            if (key in SExtractor._SE_config_special_keys):
                continue

            if (key == "PHOT_AUTOPARAMS"):  # tuple instead of a single value
                value = " ".join(map(str, self.config[key]))
            else:
                value = str(self.config[key])

            print(("%-16s       %-16s # %s" % (key, value, SExtractor._SE_config[key]['comment'])), file=main_f)

        main_f.close()
示例#6
0
    def compress():

        with builtins.open(uc_fn, 'rb') as f_in:
            with builtins.open(cpm_fn, 'wb') as f_out:
                gz_out = GzipFile(
                    f_out, mode='wb', extra=[
                        'this is extra', {
                            'foo': 'bar'}], comment='This is the comment')
                while True:
                    chunk = f_in.read(10)
                    if not chunk:
                        break
                    gz_out.write(chunk)
                gz_out.close()
示例#7
0
def open(filename,
         flag='c',
         protocol=None,
         writeback=False,
         block=True,
         lckfilename=None):
    """
    Open the sheve file, createing a lockfile at filename.lck.

    If block is False then a IOError will be raised if the lock cannot be acquired.
    """
    if lckfilename is None:
        lckfilename = filename + ".lck"
    lckfile = builtins.open(lckfilename, 'w')

    # Accquire the lock
    if flag == 'r':
        lockflags = LOCK_SH
    else:
        lockflags = LOCK_EX
    if not block:
        lockflags = LOCK_NB
    fcntl.flock(lckfile.fileno(), lockflags)

    shelf = shelve.open(filename, flag, protocol, writeback)
    shelf.close = types.MethodType(_close, shelf)
    shelf.lckfile = lckfile
    return shelf
示例#8
0
    def decompress():

        with builtins.open(cpm_fn, 'rb') as f_in:
            with builtins.open(dcpm_fn, 'wb') as f_out:
                gz_in = GzipFile(flo(f_in), mode='rb')

                while True:
                    chunk = gz_in.read(10)
                    if not chunk:
                        break
                    f_out.write(chunk)
                gz_in.close()

                print('Extra:', gz_in.extra)
                print('Extra:', gz_in.extra[1]['foo'])
                print('Comment:', gz_in.comment)
示例#9
0
    def compress():

        with builtins.open(uc_fn, 'rb') as f_in:
            with builtins.open(cpm_fn, 'wb') as f_out:
                gz_out = GzipFile(f_out,
                                  mode='wb',
                                  extra=['this is extra', {
                                      'foo': 'bar'
                                  }],
                                  comment='This is the comment')
                while True:
                    chunk = f_in.read(10)
                    if not chunk:
                        break
                    gz_out.write(chunk)
                gz_out.close()
示例#10
0
    def decompress():

        with builtins.open(cpm_fn, 'rb') as f_in:
            with builtins.open(dcpm_fn, 'wb') as f_out:
                gz_in = GzipFile(flo(f_in), mode='rb')

                while True:
                    chunk = gz_in.read(10)
                    if not chunk:
                        break
                    f_out.write(chunk)
                gz_in.close()

                print('Extra:', gz_in.extra)
                print('Extra:', gz_in.extra[1]['foo'])
                print('Comment:', gz_in.comment)
示例#11
0
    def __init__(self,
                 key,
                 mode='r',
                 connection=None,
                 encrypt=True,
                 version_id=None):
        from baiji.connection import S3Connection
        self.encrypt = encrypt
        self.key = key
        if path.islocal(key):
            self.should_upload_on_close = False
            self.mode = FileMode(mode, allowed_modes='arwxb+t')
            from six.moves import builtins
            local_path = path.parse(key).path
            if self.mode.is_output and not os.path.exists(
                    os.path.dirname(local_path)):
                from baiji.util.shutillib import mkdir_p
                mkdir_p(os.path.dirname(local_path))
            try:
                # Use os.open to catch exclusive access to the file, but use open to get a nice, useful file object
                self.fd = os.open(local_path, self.mode.flags)
                self.f = builtins.open(local_path,
                                       self.mode.mode.replace('x', 'w'))
                os.close(self.fd)
            except OSError as e:
                import errno
                if e.errno is errno.EEXIST:
                    raise KeyExists("Local file exists: %s" % local_path)
                elif e.errno is errno.ENOENT:
                    raise KeyNotFound("Local file does not exist: %s" %
                                      local_path)
                else:
                    raise IOError(e.errno, "%s: %s" % (e.strerror, e.filename))
        else:
            if connection is None:
                connection = S3Connection()
            self.connection = connection

            self.mode = FileMode(mode, allowed_modes='rwxbt')
            self.should_upload_on_close = self.mode.is_output
            if self.mode.creating_exclusively:
                if self.connection.exists(self.key):
                    raise KeyExists("Key exists in bucket: %s" % self.key)
                else:
                    self.connection.touch(self.key, encrypt=self.encrypt)
            # Use w+ so we can read back the contents in upload()
            new_mode = ('w+' + (self.mode.binary and 'b' or '') +
                        (self.mode.text and 't' or ''))
            from baiji.util import tempfile
            self.f = tempfile.NamedTemporaryFile(
                mode=new_mode,
                suffix=os.path.splitext(path.parse(self.key).path)[1])
            self.name = self.f.name
            self.remotename = key  # Used by some serialization code to find files which sit along side the file in question, like textures which sit next to a mesh file
            if self.mode.reading:
                self.connection.cp(self.key,
                                   self.name,
                                   force=True,
                                   version_id=version_id)
示例#12
0
def open(path, mode='rb'):
    '''Open a Gzip file.'''
    with builtins.open(path, mode) as raw:
        fobj = from_file(raw)
        try:
            yield fobj
        finally:
            fobj.close()
示例#13
0
文件: base.py 项目: tophers42/stor
 def open(self, *args, **kwargs):
     """
     Opens a path and retains interface compatibility with
     `SwiftPath` by popping the unused ``swift_upload_args`` keyword
     argument.
     """
     kwargs.pop('swift_upload_kwargs', None)
     return builtins.open(self, *args, **kwargs)
示例#14
0
    def __init__(self, name, mode='r'):
        self.name = name
        self.mode = mode
        self.closed = True

        self._file = None
        self._keys = list()
        self._keys_positions = {}
        self._output = None
        self._firstline = True

        if self.mode != 'r':
            raise ValueError(
                'only read-only access is now implemented.'
                )

        self._file = __builtin__.open(self.name, self.mode)
        self.closed = False

        # Reading header

        self._line = self._file.readline()
        if not(self._line):
            raise WrongSExtractorfileException(
                'not a SExtractor text catalog (empty file)'
                )

        while (self._line):
            __ll = (self._line).replace('\n', '')
            if __ll[0] == '#':   # Still in header
                columns = __ll.split()
                if len(columns) < 3:
                    raise WrongSExtractorfileException(
                        'not a SExtractor text catalog (invalid header)'
                        )
                name = columns[2]
                if name not in SExtractorfile._SE_keys:
                    raise WrongSExtractorfileException(
                        'not a SExtractor text catalog (unknown keyword %s)'
                        % name
                        )
                self._keys_positions[name] = int(columns[1]) - 1
                self._keys.append(name)
            else:
                break
            self._line = self._file.readline()

        if not(self._keys):
            raise WrongSExtractorfileException(
                'not a SExtractor text catalog (empty header)'
                )

        self._outdict = dict([(k, None) for k in self._keys])
        self._firstline = True
示例#15
0
    def open(self, *args, **kwargs):
        """
        Opens a path and retains interface compatibility with
        `SwiftPath` by popping the unused ``swift_upload_args`` keyword
        argument.

        Creates parent directory if it does not exist.
        """
        kwargs.pop('swift_upload_kwargs', None)
        self.parent.makedirs_p()
        return builtins.open(self, *args, **kwargs)
示例#16
0
文件: base.py 项目: anujkumar93/stor
    def open(self, mode=None, encoding=None):
        """
        Opens a path and retains interface compatibility with
        `SwiftPath` by popping the unused ``swift_upload_args`` keyword
        argument.

        Creates parent directory if it does not exist.
        """
        self.parent.makedirs_p()
        # only set kwargs if they aren't set to avoid overwriting defaults
        kwargs = {k: v for k, v in [('mode', mode), ('encoding', encoding)] if v}
        return builtins.open(self, **kwargs)
示例#17
0
 def __init__(self, filename, mode=None):
     if not mode:
         mode = 'rb'
     if not "b" in mode:
         mode += "b"
     self.fileobj = builtins.open(filename, mode)
     if "r" in mode:
         self.determine_file_type()
         self.readheader()
         self.readlookup()
         self.sectorsize = self.getsectorsize()
     self.mask = None
     self.nland = None
示例#18
0
文件: secret.py 项目: mailgun/pylemma
def _read_key_from_disk(keypath):
    """
    Reads key from disk and returns the base64-decoded key. New lines
    (if they exist) are stripped.
    """

    # read key from disk
    encoded_secret_key = builtins.open(keypath).read()

    # strip newlines if they exist
    encoded_key = encoded_secret_key.strip('\n')

    # decode base64-encoding and return key bytes
    return encodedstring_to_bytes(encoded_key)
示例#19
0
    def __init__(self, name, mode='r'):
        self.name = name
        self.mode = mode
        self.closed = True

        self._file = None
        self._keys = list()
        self._keys_positions = {}
        self._output = None
        self._firstline = True

        if self.mode != 'r':
            raise ValueError('only read-only access is now implemented.')

        self._file = __builtin__.open(self.name, self.mode)
        self.closed = False

        # Reading header

        self._line = self._file.readline()
        if not (self._line):
            raise WrongSExtractorfileException(
                'not a SExtractor text catalog (empty file)')

        while (self._line):
            __ll = (self._line).replace('\n', '')
            if __ll[0] == '#':  # Still in header
                columns = __ll.split()
                if len(columns) < 3:
                    raise WrongSExtractorfileException(
                        'not a SExtractor text catalog (invalid header)')
                name = columns[2]
                if name not in SExtractorfile._SE_keys:
                    raise WrongSExtractorfileException(
                        'not a SExtractor text catalog (unknown keyword %s)' %
                        name)
                self._keys_positions[name] = int(columns[1]) - 1
                self._keys.append(name)
            else:
                break
            self._line = self._file.readline()

        if not (self._keys):
            raise WrongSExtractorfileException(
                'not a SExtractor text catalog (empty header)')

        self._outdict = dict([(k, None) for k in self._keys])
        self._firstline = True
示例#20
0
    def __init__(self, filename_or_obj=None, mode=None,
                 compresslevel=9,
                 mtime=None, comment=None, extra=None):
        """Constructor for the GzipFile class.

        At least one of fileobj and filename must be given a
        non-trivial value.

        The new class instance is based on fileobj, which can be a regular
        file, a StringIO object, or any other object which simulates a file.
        It defaults to None, in which case filename is opened to provide
        a file object.

        When fileobj is not None, the filename argument is only used to be
        included in the gzip file header, which may includes the original
        filename of the uncompressed file.  It defaults to the filename of
        fileobj, if discernible; otherwise, it defaults to the empty string,
        and in this case the original filename is not included in the header.

        The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', or 'wb',
        depending on whether the file will be read or written.  The default
        is the mode of fileobj if discernible; otherwise, the default is 'rb'.
        Be aware that only the 'rb', 'ab', and 'wb' values should be used
        for cross-platform portability.

        The compresslevel argument is an integer from 0 to 9 controlling the
        level of compression; 1 is fastest and produces the least compression,
        and 9 is slowest and produces the most compression. 0 is no compression
        at all. The default is 9.

        The mtime argument is an optional numeric timestamp to be written
        to the stream when compressing.  All gzip compressed streams
        are required to contain a timestamp.  If omitted or None, the
        current time is used.  This module ignores the timestamp when
        decompressing; however, some programs, such as gunzip, make use
        of it.  The format of the timestamp is the same as that of the
        return value of time.time() and of the st_mtime member of the
        object returned by os.stat().

        """

        # Make sure we don't inadvertently enable universal newlines on the
        # underlying file object - in read mode, this causes data corruption.
        if mode:
            mode = mode.replace('U', '')
        # guarantee the file is opened in binary mode on platforms
        # that care about that sort of thing
        if mode and 'b' not in mode:
            mode += 'b'

        if hasattr(filename_or_obj, 'read'):
            filename = None
            fileobj = filename_or_obj
        elif isinstance(filename_or_obj, string_types):
            filename = filename_or_obj
            fileobj = None
        else:
            raise IOError('First parameter must be a string' +
                          ' filename or file-like object')

        if fileobj is None:
            fileobj = self.myfileobj = builtins.open(filename, mode or 'rb')

        if filename is None:
            # Issue #13781: os.fdopen() creates a fileobj with a bogus name
            # attribute. Avoid saving this in the gzip header's filename field.
            if hasattr(fileobj, 'name') and fileobj.name != '<fdopen>':
                filename = fileobj.name
            else:
                filename = ''
        if mode is None:
            if hasattr(fileobj, 'mode'):
                mode = fileobj.mode
            else:
                mode = 'rb'

        if mode[0:1] == 'r':
            self.mode = READ
            # Set flag indicating start of a new member
            self._new_member = True
            # Buffer data read from gzip file. extrastart is offset in
            # stream where buffer starts. extrasize is number of
            # bytes remaining in buffer from current stream position.
            self.extrabuf = ''
            self.extrasize = 0
            self.extrastart = 0
            self.name = filename
            # Starts small, scales exponentially
            self.min_readsize = 100

        elif mode[0:1] == 'w' or mode[0:1] == 'a':
            self.mode = WRITE
            self._init_write(filename)
            self.compress = zlib.compressobj(compresslevel,
                                             zlib.DEFLATED,
                                             -zlib.MAX_WBITS,
                                             zlib.DEF_MEM_LEVEL,
                                             0)
        else:
            raise IOError('Mode ' + mode + ' not supported')

        self.fileobj = fileobj
        self.offset = 0
        self.mtime = mtime
        self.done = False
        self.comment = comment
        self.extra = extra

        if self.mode == WRITE:
            self._write_gzip_header()
示例#21
0
 def init_write():
     with builtins.open(uc_fn, 'w') as f:
         s = ':'.join(['{:03d}'.format(i) for i in range(50)])
         f.write(s)
示例#22
0
    def __init__(self,
                 filename_or_obj=None,
                 mode=None,
                 compresslevel=9,
                 mtime=None,
                 comment=None,
                 extra=None):
        """Constructor for the GzipFile class.

        At least one of fileobj and filename must be given a
        non-trivial value.

        The new class instance is based on fileobj, which can be a regular
        file, a StringIO object, or any other object which simulates a file.
        It defaults to None, in which case filename is opened to provide
        a file object.

        When fileobj is not None, the filename argument is only used to be
        included in the gzip file header, which may includes the original
        filename of the uncompressed file.  It defaults to the filename of
        fileobj, if discernible; otherwise, it defaults to the empty string,
        and in this case the original filename is not included in the header.

        The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', or 'wb',
        depending on whether the file will be read or written.  The default
        is the mode of fileobj if discernible; otherwise, the default is 'rb'.
        Be aware that only the 'rb', 'ab', and 'wb' values should be used
        for cross-platform portability.

        The compresslevel argument is an integer from 0 to 9 controlling the
        level of compression; 1 is fastest and produces the least compression,
        and 9 is slowest and produces the most compression. 0 is no compression
        at all. The default is 9.

        The mtime argument is an optional numeric timestamp to be written
        to the stream when compressing.  All gzip compressed streams
        are required to contain a timestamp.  If omitted or None, the
        current time is used.  This module ignores the timestamp when
        decompressing; however, some programs, such as gunzip, make use
        of it.  The format of the timestamp is the same as that of the
        return value of time.time() and of the st_mtime member of the
        object returned by os.stat().

        """

        # Make sure we don't inadvertently enable universal newlines on the
        # underlying file object - in read mode, this causes data corruption.
        if mode:
            mode = mode.replace('U', '')
        # guarantee the file is opened in binary mode on platforms
        # that care about that sort of thing
        if mode and 'b' not in mode:
            mode += 'b'

        if hasattr(filename_or_obj, 'read'):
            filename = None
            fileobj = filename_or_obj
        elif isinstance(filename_or_obj, string_types):
            filename = filename_or_obj
            fileobj = None
        else:
            raise IOError('First parameter must be a string' +
                          ' filename or file-like object')

        if fileobj is None:
            fileobj = self.myfileobj = builtins.open(filename, mode or 'rb')

        if filename is None:
            # Issue #13781: os.fdopen() creates a fileobj with a bogus name
            # attribute. Avoid saving this in the gzip header's filename field.
            if hasattr(fileobj, 'name') and fileobj.name != '<fdopen>':
                filename = fileobj.name
            else:
                filename = ''
        if mode is None:
            if hasattr(fileobj, 'mode'):
                mode = fileobj.mode
            else:
                mode = 'rb'

        if mode[0:1] == 'r':
            self.mode = READ
            # Set flag indicating start of a new member
            self._new_member = True
            # Buffer data read from gzip file. extrastart is offset in
            # stream where buffer starts. extrasize is number of
            # bytes remaining in buffer from current stream position.
            self.extrabuf = ''
            self.extrasize = 0
            self.extrastart = 0
            self.name = filename
            # Starts small, scales exponentially
            self.min_readsize = 100

        elif mode[0:1] == 'w' or mode[0:1] == 'a':
            self.mode = WRITE
            self._init_write(filename)
            self.compress = zlib.compressobj(compresslevel, zlib.DEFLATED,
                                             -zlib.MAX_WBITS,
                                             zlib.DEF_MEM_LEVEL, 0)
        else:
            raise IOError('Mode ' + mode + ' not supported')

        self.fileobj = fileobj
        self.offset = 0
        self.mtime = mtime
        self.done = False
        self.comment = comment
        self.extra = extra

        if self.mode == WRITE:
            self._write_gzip_header()
示例#23
0
 def init_write():
     with builtins.open(uc_fn, 'w') as f:
         s = ':'.join(['{:03d}'.format(i) for i in range(50)])
         f.write(s)
示例#24
0
def open(path, *args, **kwargs):  # pylint: disable=redefined-builtin
  return builtins.open(extend(path), *args, **kwargs)
示例#25
0
 def open(self, path, mode):
     """Wrapper on __builtin__.open used to simplify unit testing."""
     from six.moves import builtins
     return builtins.open(path, mode)
示例#26
0
    def __init__(self, filename=None, mode=None,
                 compresslevel=9, fileobj=None, mtime=None):
        """Constructor for the GzipFile class.

        At least one of fileobj and filename must be given a
        non-trivial value.

        The new class instance is based on fileobj, which can be a regular
        file, an io.BytesIO object, or any other object which simulates a file.
        It defaults to None, in which case filename is opened to provide
        a file object.

        When fileobj is not None, the filename argument is only used to be
        included in the gzip file header, which may includes the original
        filename of the uncompressed file.  It defaults to the filename of
        fileobj, if discernible; otherwise, it defaults to the empty string,
        and in this case the original filename is not included in the header.

        The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', or 'wb',
        depending on whether the file will be read or written.  The default
        is the mode of fileobj if discernible; otherwise, the default is 'rb'.
        A mode of 'r' is equivalent to one of 'rb', and similarly for 'w' and
        'wb', and 'a' and 'ab'.

        The compresslevel argument is an integer from 0 to 9 controlling the
        level of compression; 1 is fastest and produces the least compression,
        and 9 is slowest and produces the most compression. 0 is no compression
        at all. The default is 9.

        The mtime argument is an optional numeric timestamp to be written
        to the stream when compressing.  All gzip compressed streams
        are required to contain a timestamp.  If omitted or None, the
        current time is used.  This module ignores the timestamp when
        decompressing; however, some programs, such as gunzip, make use
        of it.  The format of the timestamp is the same as that of the
        return value of time.time() and of the st_mtime member of the
        object returned by os.stat().

        """

        if mode and ('t' in mode or 'U' in mode):
            raise ValueError("Invalid mode: {!r}".format(mode))
        if mode and 'b' not in mode:
            mode += 'b'
        if fileobj is None:
            fileobj = self.myfileobj = builtins.open(filename, mode or 'rb')
        if filename is None:
            filename = getattr(fileobj, 'name', '')
            if not isinstance(filename, six.string_types):
                filename = ''
        if mode is None:
            mode = getattr(fileobj, 'mode', 'rb')

        if mode.startswith('r'):
            self.mode = READ
            # Set flag indicating start of a new member
            self._new_member = True
            # Buffer data read from gzip file. extrastart is offset in
            # stream where buffer starts. extrasize is number of
            # bytes remaining in buffer from current stream position.
            self.extrabuf = b""
            self.extrasize = 0
            self.extrastart = 0
            self.name = filename
            # Starts small, scales exponentially
            self.min_readsize = 100
            fileobj = _PaddedFile(fileobj)

        elif mode.startswith(('w', 'a')):
            self.mode = WRITE
            self._init_write(filename)
            self.compress = zlib.compressobj(compresslevel,
                                             zlib.DEFLATED,
                                             -zlib.MAX_WBITS,
                                             zlib.DEF_MEM_LEVEL,
                                             0)
        else:
            raise ValueError("Invalid mode: {!r}".format(mode))

        self.fileobj = fileobj
        self.offset = 0
        self.mtime = mtime

        if self.mode == WRITE:
            self._write_gzip_header()
示例#27
0
 def open(self, path, mode):
     """Wrapper on __builtin__.open used to simplify unit testing."""
     from six.moves import builtins
     return builtins.open(path, mode)