コード例 #1
0
 def set_connection_value(self, snow_runner: str, attribute: str,
                          value: str):
     if snow_runner not in self.connections:
         raise ParameterError(self.configuration_file,
                              f"{snow_runner} was not found in the xml!!")
     if attribute not in self.connections[snow_runner]:
         raise ParameterError(
             self.configuration_file,
             f"{attribute} was not found in {snow_runner}!!")
     self.connections[snow_runner][attribute] = value
コード例 #2
0
ファイル: GDTInterface.py プロジェクト: rapiden/AutoTester
 def __init__(self, connection: str):
     self.connection = connection
     self.is_connected = False
     try:
         self.gdt_client = GdtClient(Utilities.get_configuration_file(), self.connection)
     except:
         raise ParameterError('configuration_file or connection', f'Either the configuration file path ({Utilities.get_configuration_file()}) or the required connection ({self.connection}) is not valid.')
コード例 #3
0
ファイル: ConnMan.py プロジェクト: xrage/s3cmd
 def get(hostname, ssl=None):
     cfg = Config()
     if ssl == None:
         ssl = cfg.use_https
     conn = None
     if cfg.proxy_host != "":
         if ssl and sys.hexversion < 0x02070000:
             raise ParameterError(
                 "use_https=True can't be used with proxy on Python <2.7")
         conn_id = "proxy://%s:%s" % (cfg.proxy_host, cfg.proxy_port)
     else:
         conn_id = "http%s://%s" % (ssl and "s" or "", hostname)
     ConnMan.conn_pool_sem.acquire()
     if not ConnMan.conn_pool.has_key(conn_id):
         ConnMan.conn_pool[conn_id] = []
     if len(ConnMan.conn_pool[conn_id]):
         conn = ConnMan.conn_pool[conn_id].pop()
         debug("ConnMan.get(): re-using connection: %s#%d" %
               (conn.id, conn.counter))
     ConnMan.conn_pool_sem.release()
     if not conn:
         debug("ConnMan.get(): creating new connection: %s" % conn_id)
         conn = http_connection(conn_id, hostname, ssl, cfg)
         conn.c.connect()
     conn.counter += 1
     return conn
コード例 #4
0
ファイル: Utilities.py プロジェクト: rapiden/AutoTester
    def bool_to_validity(bool_value: bool) -> str:
        if bool_value is True:
            return 'Valid'

        elif bool_value is False:
            return 'Invalid'
        else:
            raise ParameterError('bool_value',
                                 'The value is not True nor False.')
コード例 #5
0
ファイル: Utilities.py プロジェクト: rapiden/AutoTester
    def validity_to_bool(validity: str) -> bool:
        if validity.lower() == 'valid':
            return True

        elif validity.lower() == 'invalid':
            return False
        else:
            raise ParameterError(
                'validity', 'The value is not \'valid\' nor \'invalid\'.')
コード例 #6
0
ファイル: Utilities.py プロジェクト: rapiden/AutoTester
    def bool_to_affirmative(bool_value: bool) -> str:
        if bool_value is True:
            return 'Yes'

        elif bool_value is False:
            return 'No'

        else:
            raise ParameterError('bool_value',
                                 'The value is not True nor False.')
コード例 #7
0
ファイル: GDTInterface.py プロジェクト: rapiden/AutoTester
 def read_buffer(self, buffer_name: str, buffer_field: str, element_field: str):
     current_path = os.getcwd()
     if self.is_connected is True:
         try:
             os.chdir(Utilities.get_gdt_interface_lib_dir())
             return self.gdt_client.ReadBufferElement(buffer_name, buffer_field, element_field, None, True, 50)
         except System.Collections.Generic.KeyNotFoundException:
             raise ParameterError('buffer_name or buffer_field', f'Either the buffer name ({buffer_name}) or the buffer field ({buffer_field}) is not valid.')
         finally:
             os.chdir(current_path)
     else:
         raise GDTConnectionError('GDT Interface is not connected yet.')
コード例 #8
0
ファイル: GDTInterface.py プロジェクト: rapiden/AutoTester
    def inject_data_item_validity(self, data_item_type: GDTInterfaceDataType, data_item_name: str, validity: bool) -> bool:
        if self.is_connected is True:
            try:
                clr_data_item_type = self.__convert_local_type_to_clr_type(data_item_type)
                is_injected = self.gdt_client.WriteDataItemValidity(clr_data_item_type, data_item_name, validity)

            except:
                raise ParameterError('data_item_type, data_item_name or validity', f'Either the data item type ({data_item_type}), the data item name ({data_item_name}) or the validity ({validity}) is not valid.')

            return is_injected

        else:
            raise GDTConnectionError('GDT Interface is not connected yet.')
コード例 #9
0
ファイル: Utilities.py プロジェクト: rapiden/AutoTester
    def validity_or_number_to_bool(validity) -> bool:
        if validity == 1:
            return True
        elif validity == 0:
            return False
        elif validity.lower() == 'valid' or validity == "1":
            return True

        elif validity.lower() == 'invalid' or validity == "0":
            return False
        else:
            raise ParameterError(
                'validity', 'The value is not \'valid\' nor \'invalid\'.')
コード例 #10
0
ファイル: GDTInterface.py プロジェクト: rapiden/AutoTester
 def inject_struct(self, struct_name: str, struct_field: str, value) -> bool:
     current_path = os.getcwd()
     if self.is_connected is True:
         try:
             os.chdir(Utilities.get_gdt_interface_lib_dir())
             is_injected = self.gdt_client.WriteStructField(struct_name, struct_field, value)
             return is_injected
         except:
             raise ParameterError('struct_name, struct_field or value', f'Either the struct name ({struct_name}), the struct field ({struct_field}) or the value ({value}) is not valid.')
         finally:
             os.chdir(current_path)
     else:
         raise GDTConnectionError('GDT Interface is not connected yet.')
コード例 #11
0
ファイル: GDTInterface.py プロジェクト: rapiden/AutoTester
    def inject_data_item_override(self, data_item_type: GDTInterfaceDataType, data_item_name: str, override: bool) -> bool:
        if self.is_connected is True:
            try:
                clr_data_item_type = self.__convert_local_type_to_clr_type(data_item_type)
                is_injected = self.gdt_client.SetDataItemOverride(clr_data_item_type, data_item_name, override)

            except:
                raise ParameterError('data_item_type, data_item_name or override', f'Either the data item type ({data_item_type}), the data item name ({data_item_name}) or the override ({override}) is not valid.')

            return is_injected

        else:
            raise GDTConnectionError('GDT Interface is not connected yet.')
コード例 #12
0
ファイル: GDTInterface.py プロジェクト: rapiden/AutoTester
    def write_data_item(self, data_item_type: GDTInterfaceDataType, data_item_name: str, value, validity, override) -> bool:
        if self.is_connected is True:
            # Necessary for CLR function parameters compatibility.
            nullable_validity = System.Nullable[System.Boolean](validity)
            nullable_override = System.Nullable[System.Boolean](override)

            try:
                clr_data_item_type = self.__convert_local_type_to_clr_type(data_item_type)
                is_injected = self.gdt_client.WriteDataItem(clr_data_item_type, data_item_name, value, nullable_validity, nullable_override)

            except:
                raise ParameterError('data_item_type, data_item_name or value', f'Either the data item type ({data_item_type}), the data item name ({data_item_name}) or the value ({value}) is not valid.')

            return is_injected

        else:
            raise GDTConnectionError('GDT Interface is not connected yet.')
コード例 #13
0
 def setAclPublic(self, acl_public):
     le = self.tree.find(".//LoggingEnabled")
     if not le:
         raise ParameterError(
             "Logging not enabled, can't set default ACL for logs")
     tg = le.find(".//TargetGrants")
     if not acl_public:
         if not tg:
             ## All good, it's not been there
             return
         else:
             le.remove(tg)
     else:  # acl_public == True
         anon_read = GranteeAnonRead().getElement()
         if not tg:
             tg = ET.SubElement(le, "TargetGrants")
         ## What if TargetGrants already exists? We should check if
         ## AnonRead is there before appending a new one. Later...
         tg.append(anon_read)
コード例 #14
0
ファイル: GDTInterface.py プロジェクト: rapiden/AutoTester
    def read_struct(self, struct_name: str, struct_field: str):
        current_path = os.getcwd()
        if self.is_connected is True:
            try:
                os.chdir(Utilities.get_gdt_interface_lib_dir())
                struct = self.gdt_client.ReadStruct(struct_name, None, 50)
                if struct is None:
                    raise StopIteration

                field_value = struct.TryGetValue(struct_field, None)
                if field_value[0] is False:
                    raise StopIteration

                return field_value[1].Value
            except:
                raise ParameterError('struct_name or struct_field', f'Either the struct name ({struct_name}) or the struct field ({struct_field}) is not valid.')
            finally:
                os.chdir(current_path)
        else:
            raise GDTConnectionError('GDT Interface is not connected yet.')
コード例 #15
0
ファイル: GDTInterface.py プロジェクト: rapiden/AutoTester
    def read_data_item(self, data_item_type: GDTInterfaceDataType, data_item_name: str):
        if self.is_connected is True:
            try:
                clr_data_item_type = self.__convert_local_type_to_clr_type(data_item_type)
                value_read = self.gdt_client.ReadDataItem(clr_data_item_type, data_item_name, None, True, True, 0)

            except:
                raise ParameterError('data_item_type or data_item_name',
                                     f'Either the data item type ({data_item_type}) or the data item name ({data_item_name}) is not valid.')

            ret_dict = {
                "value": value_read[1],
                "validity": value_read[2],
                "override": value_read[3]
            }

            return ret_dict

        else:
            raise GDTConnectionError('GDT Interface is not connected yet.')
コード例 #16
0
def fetch_remote_list(args,
                      require_attribs=False,
                      recursive=None,
                      uri_params={}):
    def _get_remote_attribs(uri, remote_item):
        response = S3(cfg).object_info(uri)
        if not response.get('headers'):
            return

        remote_item.update({
            'size':
            int(response['headers']['content-length']),
            'md5':
            response['headers']['etag'].strip('"\''),
            'timestamp':
            dateRFC822toUnix(response['headers']['last-modified'])
        })
        try:
            md5 = response['s3cmd-attrs']['md5']
            remote_item.update({'md5': md5})
            debug(u"retreived md5=%s from headers" % md5)
        except KeyError:
            pass

    def _get_filelist_remote(remote_uri, recursive=True):
        ## If remote_uri ends with '/' then all remote files will have
        ## the remote_uri prefix removed in the relative path.
        ## If, on the other hand, the remote_uri ends with something else
        ## (probably alphanumeric symbol) we'll use the last path part
        ## in the relative path.
        ##
        ## Complicated, eh? See an example:
        ## _get_filelist_remote("s3://bckt/abc/def") may yield:
        ## { 'def/file1.jpg' : {}, 'def/xyz/blah.txt' : {} }
        ## _get_filelist_remote("s3://bckt/abc/def/") will yield:
        ## { 'file1.jpg' : {}, 'xyz/blah.txt' : {} }
        ## Furthermore a prefix-magic can restrict the return list:
        ## _get_filelist_remote("s3://bckt/abc/def/x") yields:
        ## { 'xyz/blah.txt' : {} }

        info(u"Retrieving list of remote files for %s ..." % remote_uri)
        empty_fname_re = re.compile(r'\A\s*\Z')

        total_size = 0

        s3 = S3(Config())
        response = s3.bucket_list(remote_uri.bucket(),
                                  prefix=remote_uri.object(),
                                  recursive=recursive,
                                  uri_params=uri_params)

        rem_base_original = rem_base = remote_uri.object()
        remote_uri_original = remote_uri
        if rem_base != '' and rem_base[-1] != '/':
            rem_base = rem_base[:rem_base.rfind('/') + 1]
            remote_uri = S3Uri(u"s3://%s/%s" % (remote_uri.bucket(), rem_base))
        rem_base_len = len(rem_base)
        rem_list = FileDict(ignore_case=False)
        break_now = False
        for object in response['list']:
            if object['Key'] == rem_base_original and object['Key'][-1] != "/":
                ## We asked for one file and we got that file :-)
                key = unicodise(os.path.basename(deunicodise(object['Key'])))
                object_uri_str = remote_uri_original.uri()
                break_now = True
                rem_list = FileDict(
                    ignore_case=False
                )  ## Remove whatever has already been put to rem_list
            else:
                key = object['Key'][
                    rem_base_len:]  ## Beware - this may be '' if object['Key']==rem_base !!
                object_uri_str = remote_uri.uri() + key
            if empty_fname_re.match(key):
                # Objects may exist on S3 with empty names (''), which don't map so well to common filesystems.
                warning(u"Empty object name on S3 found, ignoring.")
                continue
            rem_list[key] = {
                'size': int(object['Size']),
                'timestamp': dateS3toUnix(
                    object['LastModified']
                ),  ## Sadly it's upload time, not our lastmod time :-(
                'md5': object['ETag'].strip('"\''),
                'object_key': object['Key'],
                'object_uri_str': object_uri_str,
                'base_uri': remote_uri,
                'dev': None,
                'inode': None,
            }
            if '-' in rem_list[key][
                    'md5']:  # always get it for multipart uploads
                _get_remote_attribs(S3Uri(object_uri_str), rem_list[key])
            md5 = rem_list[key]['md5']
            rem_list.record_md5(key, md5)
            total_size += int(object['Size'])
            if break_now:
                break
        return rem_list, total_size

    cfg = Config()
    remote_uris = []
    remote_list = FileDict(ignore_case=False)

    if type(args) not in (list, tuple, set):
        args = [args]

    if recursive == None:
        recursive = cfg.recursive

    for arg in args:
        uri = S3Uri(arg)
        if not uri.type == 's3':
            raise ParameterError("Expecting S3 URI instead of '%s'" % arg)
        remote_uris.append(uri)

    total_size = 0

    if recursive:
        for uri in remote_uris:
            objectlist, tmp_total_size = _get_filelist_remote(uri,
                                                              recursive=True)
            total_size += tmp_total_size
            for key in objectlist:
                remote_list[key] = objectlist[key]
                remote_list.record_md5(key, objectlist.get_md5(key))
    else:
        for uri in remote_uris:
            uri_str = uri.uri()
            ## Wildcards used in remote URI?
            ## If yes we'll need a bucket listing...
            wildcard_split_result = re.split("\*|\?", uri_str, maxsplit=1)
            if len(wildcard_split_result) == 2:  # wildcards found
                prefix, rest = wildcard_split_result
                ## Only request recursive listing if the 'rest' of the URI,
                ## i.e. the part after first wildcard, contains '/'
                need_recursion = '/' in rest
                objectlist, tmp_total_size = _get_filelist_remote(
                    S3Uri(prefix), recursive=need_recursion)
                total_size += tmp_total_size
                for key in objectlist:
                    ## Check whether the 'key' matches the requested wildcards
                    if glob.fnmatch.fnmatch(objectlist[key]['object_uri_str'],
                                            uri_str):
                        remote_list[key] = objectlist[key]
            else:
                ## No wildcards - simply append the given URI to the list
                key = unicodise(os.path.basename(deunicodise(uri.object())))
                if not key:
                    raise ParameterError(
                        u"Expecting S3 URI with a filename or --recursive: %s"
                        % uri.uri())
                remote_item = {
                    'base_uri': uri,
                    'object_uri_str': uri.uri(),
                    'object_key': uri.object()
                }
                if require_attribs:
                    _get_remote_attribs(uri, remote_item)

                remote_list[key] = remote_item
                md5 = remote_item.get('md5')
                if md5:
                    remote_list.record_md5(key, md5)
                total_size += remote_item.get('size', 0)

    remote_list, exclude_list = filter_exclude_include(remote_list)
    return remote_list, exclude_list, total_size
コード例 #17
0
            info(u"No cache file found, creating it.")

    local_uris = []
    local_list = FileDict(ignore_case=False)
    single_file = False

    if type(args) not in (list, tuple, set):
        args = [args]

    if recursive == None:
        recursive = cfg.recursive

    for arg in args:
        uri = S3Uri(arg)
        if not uri.type == 'file':
            raise ParameterError(
                "Expecting filename or directory instead of: %s" % arg)
        if uri.isdir() and not recursive:
            raise ParameterError("Use --recursive to upload a directory: %s" %
                                 arg)
        local_uris.append(uri)

    for uri in local_uris:
        list_for_uri, single_file = _get_filelist_local(local_list, uri, cache)

    ## Single file is True if and only if the user
    ## specified one local URI and that URI represents
    ## a FILE. Ie it is False if the URI was of a DIR
    ## and that dir contained only one FILE. That's not
    ## a case of single_file==True.
    if len(local_list) > 1:
        single_file = False
コード例 #18
0
ファイル: FileLists.py プロジェクト: ihipop/thecus-n2520
def fetch_local_list(args, recursive=None):
    def _get_filelist_local(local_uri):
        info(u"Compiling list of local files...")
        if local_uri.isdir():
            local_base = deunicodise(local_uri.basename())
            local_path = deunicodise(local_uri.path())
            filelist = _fswalk(local_path, cfg.follow_symlinks)
            single_file = False
        else:
            local_base = ""
            local_path = deunicodise(local_uri.dirname())
            filelist = [(local_path, [], [deunicodise(local_uri.basename())])]
            single_file = True
        loc_list = SortedDict(ignore_case=False)
        for root, dirs, files in filelist:
            rel_root = root.replace(local_path, local_base, 1)
            for f in files:
                full_name = os.path.join(root, f)
                if not os.path.isfile(full_name):
                    continue
                if os.path.islink(full_name):
                    if not cfg.follow_symlinks:
                        continue
                relative_file = unicodise(os.path.join(rel_root, f))
                if os.path.sep != "/":
                    # Convert non-unix dir separators to '/'
                    relative_file = "/".join(relative_file.split(os.path.sep))
                if cfg.urlencoding_mode == "normal":
                    relative_file = replace_nonprintables(relative_file)
                if relative_file.startswith('./'):
                    relative_file = relative_file[2:]
                sr = os.stat_result(os.lstat(full_name))
                loc_list[relative_file] = {
                    'full_name_unicode': unicodise(full_name),
                    'full_name': full_name,
                    'size': sr.st_size,
                    'mtime': sr.st_mtime,
                    ## TODO: Possibly more to save here...
                }
        return loc_list, single_file

    cfg = Config()
    local_uris = []
    local_list = SortedDict(ignore_case=False)
    single_file = False

    if type(args) not in (list, tuple):
        args = [args]

    if recursive == None:
        recursive = cfg.recursive

    for arg in args:
        uri = S3Uri(arg)
        if not uri.type == 'file':
            raise ParameterError(
                "Expecting filename or directory instead of: %s" % arg)
        if uri.isdir() and not recursive:
            raise ParameterError("Use --recursive to upload a directory: %s" %
                                 arg)
        local_uris.append(uri)

    for uri in local_uris:
        list_for_uri, single_file = _get_filelist_local(uri)
        local_list.update(list_for_uri)

    ## Single file is True if and only if the user
    ## specified one local URI and that URI represents
    ## a FILE. Ie it is False if the URI was of a DIR
    ## and that dir contained only one FILE. That's not
    ## a case of single_file==True.
    if len(local_list) > 1:
        single_file = False

    return local_list, single_file
コード例 #19
0
ファイル: Utilities.py プロジェクト: rapiden/AutoTester
    def remove_folder(folder: str) -> None:
        if os.path.exists(folder):
            shutil.rmtree(folder)

        else:
            raise ParameterError('folder', 'The folder do not exist.')
コード例 #20
0
ファイル: FileLists.py プロジェクト: wizsec/s3cmd
def fetch_local_list(args, recursive = None):
    def _get_filelist_local(loc_list, local_uri, cache):
        info(u"Compiling list of local files...")

        if deunicodise(local_uri.basename()) == "-":
            loc_list["-"] = {
                'full_name_unicode' : '-',
                'full_name' : '-',
                'size' : -1,
                'mtime' : -1,
            }
            return loc_list, True
        if local_uri.isdir():
            local_base = deunicodise(local_uri.basename())
            local_path = deunicodise(local_uri.path())
            if cfg.follow_symlinks:
                filelist = _fswalk_follow_symlinks(local_path)
            else:
                filelist = _fswalk_no_symlinks(local_path)
            single_file = False
        else:
            local_base = ""
            local_path = deunicodise(local_uri.dirname())
            filelist = [( local_path, [], [deunicodise(local_uri.basename())] )]
            single_file = True
        for root, dirs, files in filelist:
            rel_root = root.replace(local_path, local_base, 1)
            for f in files:
                full_name = os.path.join(root, f)
                if not os.path.isfile(full_name):
                    continue
                if os.path.islink(full_name):
                                    if not cfg.follow_symlinks:
                                            continue
                relative_file = unicodise(os.path.join(rel_root, f))
                if os.path.sep != "/":
                    # Convert non-unix dir separators to '/'
                    relative_file = "/".join(relative_file.split(os.path.sep))
                if cfg.urlencoding_mode == "normal":
                    relative_file = replace_nonprintables(relative_file)
                if relative_file.startswith('./'):
                    relative_file = relative_file[2:]
                sr = os.stat_result(os.lstat(full_name))
                loc_list[relative_file] = {
                    'full_name_unicode' : unicodise(full_name),
                    'full_name' : full_name,
                    'size' : sr.st_size,
                    'mtime' : sr.st_mtime,
                    'dev'   : sr.st_dev,
                    'inode' : sr.st_ino,
                    'uid' : sr.st_uid,
                    'gid' : sr.st_gid,
                    'sr': sr # save it all, may need it in preserve_attrs_list
                    ## TODO: Possibly more to save here...
                }
                if 'md5' in cfg.sync_checks:
                    md5 = cache.md5(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size)
                    if md5 is None:
                            try:
                                md5 = loc_list.get_md5(relative_file) # this does the file I/O
                            except IOError:
                                continue
                            cache.add(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size, md5)
                    loc_list.record_hardlink(relative_file, sr.st_dev, sr.st_ino, md5)
        return loc_list, single_file

    def _maintain_cache(cache, local_list):
        if cfg.cache_file:
            cache.mark_all_for_purge()
            for i in local_list.keys():
                cache.unmark_for_purge(local_list[i]['dev'], local_list[i]['inode'], local_list[i]['mtime'], local_list[i]['size'])
            cache.purge()
            cache.save(cfg.cache_file)

    cfg = Config()

    cache = HashCache()
    if cfg.cache_file:
        try:
            cache.load(cfg.cache_file)
        except IOError:
            info(u"No cache file found, creating it.")

    local_uris = []
    local_list = FileDict(ignore_case = False)
    single_file = False

    if type(args) not in (list, tuple):
        args = [args]

    if recursive == None:
        recursive = cfg.recursive

    for arg in args:
        uri = S3Uri(arg)
        if not uri.type == 'file':
            raise ParameterError("Expecting filename or directory instead of: %s" % arg)
        if uri.isdir() and not recursive:
            raise ParameterError("Use --recursive to upload a directory: %s" % arg)
        local_uris.append(uri)

    for uri in local_uris:
        list_for_uri, single_file = _get_filelist_local(local_list, uri, cache)

    ## Single file is True if and only if the user
    ## specified one local URI and that URI represents
    ## a FILE. Ie it is False if the URI was of a DIR
    ## and that dir contained only one FILE. That's not
    ## a case of single_file==True.
    if len(local_list) > 1:
        single_file = False

    _maintain_cache(cache, local_list)

    return local_list, single_file
コード例 #21
0
ファイル: FileLists.py プロジェクト: wizsec/s3cmd
def fetch_remote_list(args, require_attribs = False, recursive = None):
    def _get_filelist_remote(remote_uri, recursive = True):
        ## If remote_uri ends with '/' then all remote files will have
        ## the remote_uri prefix removed in the relative path.
        ## If, on the other hand, the remote_uri ends with something else
        ## (probably alphanumeric symbol) we'll use the last path part
        ## in the relative path.
        ##
        ## Complicated, eh? See an example:
        ## _get_filelist_remote("s3://bckt/abc/def") may yield:
        ## { 'def/file1.jpg' : {}, 'def/xyz/blah.txt' : {} }
        ## _get_filelist_remote("s3://bckt/abc/def/") will yield:
        ## { 'file1.jpg' : {}, 'xyz/blah.txt' : {} }
        ## Furthermore a prefix-magic can restrict the return list:
        ## _get_filelist_remote("s3://bckt/abc/def/x") yields:
        ## { 'xyz/blah.txt' : {} }

        info(u"Retrieving list of remote files for %s ..." % remote_uri)

        s3 = S3(Config())
        response = s3.bucket_list(remote_uri.bucket(), prefix = remote_uri.object(), recursive = recursive)

        rem_base_original = rem_base = remote_uri.object()
        remote_uri_original = remote_uri
        if rem_base != '' and rem_base[-1] != '/':
            rem_base = rem_base[:rem_base.rfind('/')+1]
            remote_uri = S3Uri("s3://%s/%s" % (remote_uri.bucket(), rem_base))
        rem_base_len = len(rem_base)
        rem_list = FileDict(ignore_case = False)
        break_now = False
        for object in response['list']:
            if object['Key'] == rem_base_original and object['Key'][-1] != "/":
                ## We asked for one file and we got that file :-)
                key = os.path.basename(object['Key'])
                object_uri_str = remote_uri_original.uri()
                break_now = True
                rem_list = FileDict(ignore_case = False)   ## Remove whatever has already been put to rem_list
            else:
                key = object['Key'][rem_base_len:]      ## Beware - this may be '' if object['Key']==rem_base !!
                object_uri_str = remote_uri.uri() + key
            rem_list[key] = {
                'size' : int(object['Size']),
                'timestamp' : dateS3toUnix(object['LastModified']), ## Sadly it's upload time, not our lastmod time :-(
                'md5' : object['ETag'][1:-1],
                'object_key' : object['Key'],
                'object_uri_str' : object_uri_str,
                'base_uri' : remote_uri,
                'dev' : None,
                'inode' : None,
            }
            md5 = object['ETag'][1:-1]
            rem_list.record_md5(key, md5)
            if break_now:
                break
        return rem_list

    cfg = Config()
    remote_uris = []
    remote_list = FileDict(ignore_case = False)

    if type(args) not in (list, tuple):
        args = [args]

    if recursive == None:
        recursive = cfg.recursive

    for arg in args:
        uri = S3Uri(arg)
        if not uri.type == 's3':
            raise ParameterError("Expecting S3 URI instead of '%s'" % arg)
        remote_uris.append(uri)

    if recursive:
        for uri in remote_uris:
            objectlist = _get_filelist_remote(uri)
            for key in objectlist:
                remote_list[key] = objectlist[key]
                remote_list.record_md5(key, objectlist.get_md5(key))
    else:
        for uri in remote_uris:
            uri_str = str(uri)
            ## Wildcards used in remote URI?
            ## If yes we'll need a bucket listing...
            if uri_str.find('*') > -1 or uri_str.find('?') > -1:
                first_wildcard = uri_str.find('*')
                first_questionmark = uri_str.find('?')
                if first_questionmark > -1 and first_questionmark < first_wildcard:
                    first_wildcard = first_questionmark
                prefix = uri_str[:first_wildcard]
                rest = uri_str[first_wildcard+1:]
                ## Only request recursive listing if the 'rest' of the URI,
                ## i.e. the part after first wildcard, contains '/'
                need_recursion = rest.find('/') > -1
                objectlist = _get_filelist_remote(S3Uri(prefix), recursive = need_recursion)
                for key in objectlist:
                    ## Check whether the 'key' matches the requested wildcards
                    if glob.fnmatch.fnmatch(objectlist[key]['object_uri_str'], uri_str):
                        remote_list[key] = objectlist[key]
            else:
                ## No wildcards - simply append the given URI to the list
                key = os.path.basename(uri.object())
                if not key:
                    raise ParameterError(u"Expecting S3 URI with a filename or --recursive: %s" % uri.uri())
                remote_item = {
                    'base_uri': uri,
                    'object_uri_str': unicode(uri),
                    'object_key': uri.object()
                }
                if require_attribs:
                    response = S3(cfg).object_info(uri)
                    remote_item.update({
                    'size': int(response['headers']['content-length']),
                    'md5': response['headers']['etag'].strip('"\''),
                    'timestamp' : dateRFC822toUnix(response['headers']['date'])
                    })
                    # get md5 from header if it's present.  We would have set that during upload
                    if response['headers'].has_key('x-amz-meta-s3cmd-attrs'):
                        attrs = parse_attrs_header(response['headers']['x-amz-meta-s3cmd-attrs'])
                        if attrs.has_key('md5'):
                            remote_item.update({'md5': attrs['md5']})

                remote_list[key] = remote_item
    return remote_list
コード例 #22
0
ファイル: FileLists.py プロジェクト: tarigancana/s3cmd
def fetch_local_list(args, is_src=False, recursive=None):
    def _fetch_local_list_info(loc_list):
        len_loc_list = len(loc_list)
        total_size = 0
        info(
            u"Running stat() and reading/calculating MD5 values on %d files, this may take some time..."
            % len_loc_list)
        counter = 0
        for relative_file in loc_list:
            counter += 1
            if counter % 1000 == 0:
                info(u"[%d/%d]" % (counter, len_loc_list))

            if relative_file == '-': continue

            full_name = loc_list[relative_file]['full_name']
            try:
                sr = os.stat_result(os.stat(deunicodise(full_name)))
            except OSError as e:
                if e.errno == errno.ENOENT:
                    # file was removed async to us getting the list
                    continue
                else:
                    raise
            loc_list[relative_file].update({
                'size': sr.st_size,
                'mtime': sr.st_mtime,
                'dev': sr.st_dev,
                'inode': sr.st_ino,
                'uid': sr.st_uid,
                'gid': sr.st_gid,
                'sr': sr  # save it all, may need it in preserve_attrs_list
                ## TODO: Possibly more to save here...
            })
            total_size += sr.st_size
            if 'md5' in cfg.sync_checks:
                md5 = cache.md5(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size)
                if md5 is None:
                    try:
                        md5 = loc_list.get_md5(
                            relative_file)  # this does the file I/O
                    except IOError:
                        continue
                    cache.add(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size,
                              md5)
                loc_list.record_hardlink(relative_file, sr.st_dev, sr.st_ino,
                                         md5, sr.st_size)
        return total_size

    def _get_filelist_local(loc_list, local_uri, cache):
        info(u"Compiling list of local files...")

        if local_uri.basename() == "-":
            try:
                uid = os.geteuid()
                gid = os.getegid()
            except:
                uid = 0
                gid = 0
            loc_list["-"] = {
                'full_name': '-',
                'size': -1,
                'mtime': -1,
                'uid': uid,
                'gid': gid,
                'dev': 0,
                'inode': 0,
            }
            return loc_list, True
        if local_uri.isdir():
            local_base = local_uri.basename()
            local_path = local_uri.path()
            if is_src and len(cfg.files_from):
                filelist = _get_filelist_from_file(cfg, local_path)
                single_file = False
            else:
                if cfg.follow_symlinks:
                    filelist = _fswalk_follow_symlinks(local_path)
                else:
                    filelist = _fswalk_no_symlinks(local_path)
                single_file = False
        else:
            local_base = ""
            local_path = local_uri.dirname()
            filelist = [(local_path, [], [local_uri.basename()])]
            single_file = True
        for root, dirs, files in filelist:
            rel_root = root.replace(local_path, local_base, 1)
            for f in files:
                full_name = os.path.join(root, f)
                if not os.path.isfile(deunicodise(full_name)):
                    if os.path.exists(deunicodise(full_name)):
                        warning(u"Skipping over non regular file: %s" %
                                full_name)
                    continue
                if os.path.islink(deunicodise(full_name)):
                    if not cfg.follow_symlinks:
                        warning(u"Skipping over symbolic link: %s" % full_name)
                        continue
                relative_file = os.path.join(rel_root, f)
                if os.path.sep != "/":
                    # Convert non-unix dir separators to '/'
                    relative_file = "/".join(relative_file.split(os.path.sep))
                if cfg.urlencoding_mode == "normal":
                    relative_file = replace_nonprintables(relative_file)
                if relative_file.startswith('./'):
                    relative_file = relative_file[2:]
                loc_list[relative_file] = {
                    'full_name': full_name,
                }

        return loc_list, single_file

    def _maintain_cache(cache, local_list):
        # if getting the file list from files_from, it is going to be
        # a subset of the actual tree.  We should not purge content
        # outside of that subset as we don't know if it's valid or
        # not.  Leave it to a non-files_from run to purge.
        if cfg.cache_file and len(cfg.files_from) == 0:
            cache.mark_all_for_purge()
            for i in local_list.keys():
                cache.unmark_for_purge(local_list[i]['dev'],
                                       local_list[i]['inode'],
                                       local_list[i]['mtime'],
                                       local_list[i]['size'])
            cache.purge()
            cache.save(cfg.cache_file)

    cfg = Config()

    cache = HashCache()
    if cfg.cache_file:
        try:
            cache.load(cfg.cache_file)
        except IOError:
            info(u"No cache file found, creating it.")

    local_uris = []
    local_list = FileDict(ignore_case=False)
    single_file = False

    if type(args) not in (list, tuple, set):
        args = [args]

    if recursive == None:
        recursive = cfg.recursive

    for arg in args:
        uri = S3Uri(arg)
        if not uri.type == 'file':
            raise ParameterError(
                "Expecting filename or directory instead of: %s" % arg)
        if uri.isdir() and not recursive:
            raise ParameterError("Use --recursive to upload a directory: %s" %
                                 arg)
        local_uris.append(uri)

    for uri in local_uris:
        list_for_uri, single_file = _get_filelist_local(local_list, uri, cache)

    ## Single file is True if and only if the user
    ## specified one local URI and that URI represents
    ## a FILE. Ie it is False if the URI was of a DIR
    ## and that dir contained only one FILE. That's not
    ## a case of single_file==True.
    if len(local_list) > 1:
        single_file = False

    local_list, exclude_list = filter_exclude_include(local_list)
    total_size = _fetch_local_list_info(local_list)
    _maintain_cache(cache, local_list)
    return local_list, single_file, exclude_list, total_size