Пример #1
0
def download(args):
    folders_to_get, files_to_get, count = collections.defaultdict(
        list), collections.defaultdict(list), 0
    foldernames, filenames = [], []
    for path in args.paths:
        # Attempt to resolve name. If --all is given or the path looks like a glob, download all matches.
        # Otherwise, the resolver will display a picker (or error out if there is no tty to display to).
        resolver_kwargs = {'allow_empty_string': False}
        if args.all or _is_glob(path):
            resolver_kwargs.update({'allow_mult': True, 'all_mult': True})

        project, folderpath, matching_files = try_call(resolve_existing_path,
                                                       path, **resolver_kwargs)
        if matching_files is None:
            matching_files = []
        elif not isinstance(matching_files, list):
            matching_files = [matching_files]

        # TODO: this could also be returned as metadata by resolve_path since
        # resolve_path knows these things in some circumstances
        path_has_explicit_proj = is_project_explicit(path) or is_jbor_str(path)

        if is_jbor_str(path):
            assert len(matching_files) == 1
            project = matching_files[0]["describe"]["project"]

        matching_folders = []
        # project may be none if path is an ID and there is no project context
        if project is not None:
            colon_pos = get_first_pos_of_char(":", path)
            if colon_pos >= 0:
                path = path[colon_pos + 1:]
            abs_path, strip_prefix = _rel2abs(path, project)
            parent_folder = os.path.dirname(abs_path)
            folder_listing = dxpy.list_subfolders(project,
                                                  parent_folder,
                                                  recurse=False)
            matching_folders = pathmatch.filter(folder_listing, abs_path)
            if '/' in matching_folders and len(matching_folders) > 1:
                # The list of subfolders is {'/', '/A', '/B'}.
                # Remove '/', otherwise we will download everything twice.
                matching_folders.remove('/')

        if len(matching_files) == 0 and len(matching_folders) == 0:
            err_exit(
                fill(
                    'Error: {path} is neither a file nor a folder name'.format(
                        path=path)))

        # If the user did not explicitly provide the project, don't pass any
        # project parameter to the API call but continue with the download.
        if not path_has_explicit_proj:
            project = dxpy.DXFile.NO_PROJECT_HINT

        # If the user explicitly provided the project and it doesn't contain
        # the files, don't allow the download.
        #
        # If length of matching_files is 0 then we're only downloading folders
        # so skip this logic since the files will be verified in the API call.
        if len(matching_files) > 0 and path_has_explicit_proj and not \
                any(object_exists_in_project(f['describe']['id'], project) for f in matching_files):
            err_exit(
                fill(
                    'Error: specified project does not contain specified file object'
                ))

        files_to_get[project].extend(matching_files)
        folders_to_get[project].extend(
            ((f, strip_prefix) for f in matching_folders))
        count += len(matching_files) + len(matching_folders)

        filenames.extend(f["describe"]["name"] for f in matching_files)
        foldernames.extend(f[len(strip_prefix):].lstrip('/')
                           for f in matching_folders)

    if len(filenames) > 0 and len(foldernames) > 0:
        name_conflicts = set(filenames) & set(foldernames)
        if len(name_conflicts) > 0:
            msg = "Error: The following paths are both file and folder names, and " \
                  "cannot be downloaded to the same destination: "
            msg += ", ".join(sorted(name_conflicts))
            err_exit(fill(msg))

    if args.output is None:
        destdir, dest_filename = os.getcwd(), None
    elif count > 1:
        if not os.path.exists(args.output):
            err_exit(
                fill(
                    "Error: When downloading multiple objects, --output must be an existing directory"
                ))
        destdir, dest_filename = args.output, None
    elif os.path.isdir(args.output):
        destdir, dest_filename = args.output, None
    elif args.output.endswith('/'):
        err_exit(
            fill("Error: {path} could not be found".format(path=args.output)))
    else:
        destdir, dest_filename = os.getcwd(), args.output

    _download_folders(folders_to_get, destdir, args)
    _download_files(files_to_get, destdir, args, dest_filename=dest_filename)
Пример #2
0
def download(args):
    # Get space for caching subfolders
    cached_folder_lists = {}

    folders_to_get, files_to_get, count = collections.defaultdict(list), collections.defaultdict(list), 0
    foldernames, filenames = [], []
    for path in args.paths:
        # Attempt to resolve name. If --all is given or the path looks like a glob, download all matches.
        # Otherwise, the resolver will display a picker (or error out if there is no tty to display to).
        resolver_kwargs = {'allow_empty_string': False}
        if args.all or _is_glob(path):
            resolver_kwargs.update({'allow_mult': True, 'all_mult': True})
        project, folderpath, matching_files = try_call(resolve_existing_path, path, **resolver_kwargs)
        if matching_files is None:
            matching_files = []
        elif not isinstance(matching_files, list):
            matching_files = [matching_files]

        matching_folders = []
        if project is not None:
            # project may be none if path is an ID and there is no project context
            colon_pos = get_first_pos_of_char(":", path)
            if colon_pos >= 0:
                path = path[colon_pos + 1:]
            abs_path, strip_prefix = _rel2abs(path, project)
            parent_folder = os.path.dirname(abs_path)
            folder_listing = _list_subfolders(project, parent_folder, cached_folder_lists, recurse=False)
            matching_folders = pathmatch.filter(folder_listing, abs_path)
            if '/' in matching_folders and len(matching_folders) > 1:
                # The list of subfolders is {'/', '/A', '/B'}.
                # Remove '/', otherwise we will download everything twice.
                matching_folders.remove('/')

        if len(matching_files) == 0 and len(matching_folders) == 0:
            err_exit(fill('Error: {path} is neither a file nor a folder name'.format(path=path)))
        files_to_get[project].extend(matching_files)
        folders_to_get[project].extend(((f, strip_prefix) for f in matching_folders))
        count += len(matching_files) + len(matching_folders)

        filenames.extend(f["describe"]["name"] for f in matching_files)
        foldernames.extend(f[len(strip_prefix):].lstrip('/') for f in matching_folders)

    if len(filenames) > 0 and len(foldernames) > 0:
        name_conflicts = set(filenames) & set(foldernames)
        if len(name_conflicts) > 0:
            msg = "Error: The following paths are both file and folder names, and " \
                  "cannot be downloaded to the same destination: "
            msg += ", ".join(sorted(name_conflicts))
            err_exit(fill(msg))

    if args.output is None:
        destdir, dest_filename = os.getcwd(), None
    elif count > 1:
        if not os.path.exists(args.output):
            err_exit(fill("Error: When downloading multiple objects, --output must be an existing directory"))
        destdir, dest_filename = args.output, None
    elif os.path.isdir(args.output):
        destdir, dest_filename = args.output, None
    elif args.output.endswith('/'):
        err_exit(fill("Error: {path} could not be found".format(path=args.output)))
    else:
        destdir, dest_filename = os.getcwd(), args.output

    _download_folders(folders_to_get, destdir, cached_folder_lists, args)
    _download_files(files_to_get, destdir, args, dest_filename=dest_filename)
Пример #3
0
def download(args):
    # Get space for caching subfolders
    cached_folder_lists = {}

    folders_to_get, files_to_get, count = collections.defaultdict(list), collections.defaultdict(list), 0
    foldernames, filenames = [], []
    for path in args.paths:
        # Attempt to resolve name. If --all is given or the path looks like a glob, download all matches.
        # Otherwise, the resolver will display a picker (or error out if there is no tty to display to).
        resolver_kwargs = {'allow_empty_string': False}
        if args.all or _is_glob(path):
            resolver_kwargs.update({'allow_mult': True, 'all_mult': True})

        project, folderpath, matching_files = try_call(resolve_existing_path, path, **resolver_kwargs)
        if matching_files is None:
            matching_files = []
        elif not isinstance(matching_files, list):
            matching_files = [matching_files]

        # TODO: this could also be returned as metadata by resolve_path since
        # resolve_path knows these things in some circumstances
        path_has_explicit_proj = is_project_explicit(path) or is_jbor_str(path)

        if is_jbor_str(path):
            assert len(matching_files) == 1
            project = matching_files[0]["describe"]["project"]

        matching_folders = []
        # project may be none if path is an ID and there is no project context
        if project is not None:
            colon_pos = get_first_pos_of_char(":", path)
            if colon_pos >= 0:
                path = path[colon_pos + 1:]
            abs_path, strip_prefix = _rel2abs(path, project)
            parent_folder = os.path.dirname(abs_path)
            folder_listing = _list_subfolders(project, parent_folder, cached_folder_lists, recurse=False)
            matching_folders = pathmatch.filter(folder_listing, abs_path)
            if '/' in matching_folders and len(matching_folders) > 1:
                # The list of subfolders is {'/', '/A', '/B'}.
                # Remove '/', otherwise we will download everything twice.
                matching_folders.remove('/')

        if len(matching_files) == 0 and len(matching_folders) == 0:
            err_exit(fill('Error: {path} is neither a file nor a folder name'.format(path=path)))

        # If the user did not explicitly provide the project, don't pass any
        # project parameter to the API call but continue with the download.
        if not path_has_explicit_proj:
            project = None

        # If the user explicitly provided the project and it doesn't contain
        # the files, don't allow the download.
        #
        # If length of matching_files is 0 then we're only downloading folders
        # so skip this logic since the files will be verified in the API call.
        if len(matching_files) > 0 and path_has_explicit_proj and not \
                any(object_exists_in_project(f['describe']['id'], project) for f in matching_files):
            err_exit(fill('Error: specified project does not contain specified file object'))

        files_to_get[project].extend(matching_files)
        folders_to_get[project].extend(((f, strip_prefix) for f in matching_folders))
        count += len(matching_files) + len(matching_folders)

        filenames.extend(f["describe"]["name"] for f in matching_files)
        foldernames.extend(f[len(strip_prefix):].lstrip('/') for f in matching_folders)

    if len(filenames) > 0 and len(foldernames) > 0:
        name_conflicts = set(filenames) & set(foldernames)
        if len(name_conflicts) > 0:
            msg = "Error: The following paths are both file and folder names, and " \
                  "cannot be downloaded to the same destination: "
            msg += ", ".join(sorted(name_conflicts))
            err_exit(fill(msg))

    if args.output is None:
        destdir, dest_filename = os.getcwd(), None
    elif count > 1:
        if not os.path.exists(args.output):
            err_exit(fill("Error: When downloading multiple objects, --output must be an existing directory"))
        destdir, dest_filename = args.output, None
    elif os.path.isdir(args.output):
        destdir, dest_filename = args.output, None
    elif args.output.endswith('/'):
        err_exit(fill("Error: {path} could not be found".format(path=args.output)))
    else:
        destdir, dest_filename = os.getcwd(), args.output

    _download_folders(folders_to_get, destdir, cached_folder_lists, args)
    _download_files(files_to_get, destdir, args, dest_filename=dest_filename)
Пример #4
0
def download(args):
    # Get space for caching subfolders
    cached_folder_lists = {}

    folders_to_get, files_to_get, count = collections.defaultdict(list), collections.defaultdict(list), 0
    foldernames, filenames = [], []
    for path in args.paths:
        # Attempt to resolve name. If --all is given or the path looks like a glob, download all matches.
        # Otherwise, the resolver will display a picker (or error out if there is no tty to display to).
        resolver_kwargs = {'allow_empty_string': False}
        if args.all or _is_glob(path):
            resolver_kwargs.update({'allow_mult': True, 'all_mult': True})
        project, folderpath, matching_files = try_call(resolve_existing_path, path, **resolver_kwargs)
        if matching_files is None:
            matching_files = []
        elif not isinstance(matching_files, list):
            matching_files = [matching_files]

        matching_folders = []
        if project is not None:
            # project may be none if path is an ID and there is no project context
            colon_pos = get_first_pos_of_char(":", path)
            if colon_pos >= 0:
                path = path[colon_pos + 1:]
            abs_path, strip_prefix = _rel2abs(path, project)
            parent_folder = os.path.dirname(abs_path)
            folder_listing = _list_subfolders(project, parent_folder, cached_folder_lists, recurse=False)
            matching_folders = pathmatch.filter(folder_listing, abs_path)
            if '/' in matching_folders and len(matching_folders) > 1:
                # The list of subfolders is {'/', '/A', '/B'}.
                # Remove '/', otherwise we will download everything twice.
                matching_folders.remove('/')

        if len(matching_files) == 0 and len(matching_folders) == 0:
            err_exit(fill('Error: {path} is neither a file nor a folder name'.format(path=path)))
        files_to_get[project].extend(matching_files)
        folders_to_get[project].extend(((f, strip_prefix) for f in matching_folders))
        count += len(matching_files) + len(matching_folders)

        filenames.extend(f["describe"]["name"] for f in matching_files)
        foldernames.extend(f[len(strip_prefix):].lstrip('/') for f in matching_folders)

    if len(filenames) > 0 and len(foldernames) > 0:
        name_conflicts = set(filenames) & set(foldernames)
        if len(name_conflicts) > 0:
            msg = "Error: The following paths are both file and folder names, and " \
                  "cannot be downloaded to the same destination: "
            msg += ", ".join(sorted(name_conflicts))
            err_exit(fill(msg))

    if args.output is None:
        destdir, dest_filename = os.getcwd(), None
    elif count > 1:
        if not os.path.exists(args.output):
            err_exit(fill("Error: When downloading multiple objects, --output must be an existing directory"))
        destdir, dest_filename = args.output, None
    elif os.path.isdir(args.output):
        destdir, dest_filename = args.output, None
    elif args.output.endswith('/'):
        err_exit(fill("Error: {path} could not be found".format(path=args.output)))
    else:
        destdir, dest_filename = os.getcwd(), args.output

    _download_folders(folders_to_get, destdir, cached_folder_lists, args)
    _download_files(files_to_get, destdir, args, dest_filename=dest_filename)