コード例 #1
0
ファイル: runner.py プロジェクト: lijiayong/arvados
def upload_workflow_collection(arvrunner, name, packed):
    collection = arvados.collection.Collection(
        api_client=arvrunner.api,
        keep_client=arvrunner.keep_client,
        num_retries=arvrunner.num_retries)
    with collection.open("workflow.cwl", "w") as f:
        f.write(
            json.dumps(packed,
                       indent=2,
                       sort_keys=True,
                       separators=(',', ': ')))

    filters = [["portable_data_hash", "=",
                collection.portable_data_hash()], ["name", "like", name + "%"]]
    if arvrunner.project_uuid:
        filters.append(["owner_uuid", "=", arvrunner.project_uuid])
    exists = arvrunner.api.collections().list(filters=filters).execute(
        num_retries=arvrunner.num_retries)

    if exists["items"]:
        logger.info("Using collection %s", exists["items"][0]["uuid"])
    else:
        collection.save_new(name=name,
                            owner_uuid=arvrunner.project_uuid,
                            ensure_unique_name=True,
                            num_retries=arvrunner.num_retries)
        logger.info("Uploaded to %s", collection.manifest_locator())

    return collection.portable_data_hash()
コード例 #2
0
 def listdir(self, fn):  # type: (unicode) -> List[unicode]
     collection, rest = self.get_collection(fn)
     if collection is not None:
         if rest:
             dir = collection.find(rest)
         else:
             dir = collection
         if dir is None:
             raise IOError(
                 errno.ENOENT, "Directory '%s' in '%s' not found" %
                 (rest, collection.portable_data_hash()))
         if not isinstance(dir, arvados.collection.RichCollectionBase):
             raise IOError(
                 errno.ENOENT, "Path '%s' in '%s' is not a Directory" %
                 (rest, collection.portable_data_hash()))
         return [abspath(l, fn) for l in list(dir.keys())]
     else:
         return super(CollectionFsAccess, self).listdir(fn)
コード例 #3
0
ファイル: runner.py プロジェクト: wtsi-hgi/arvados
def upload_workflow_collection(arvrunner, name, packed):
    collection = arvados.collection.Collection(api_client=arvrunner.api,
                                               keep_client=arvrunner.keep_client,
                                               num_retries=arvrunner.num_retries)
    with collection.open("workflow.cwl", "w") as f:
        f.write(json.dumps(packed, indent=2, sort_keys=True, separators=(',',': ')))

    filters = [["portable_data_hash", "=", collection.portable_data_hash()],
               ["name", "like", name+"%"]]
    if arvrunner.project_uuid:
        filters.append(["owner_uuid", "=", arvrunner.project_uuid])
    exists = arvrunner.api.collections().list(filters=filters).execute(num_retries=arvrunner.num_retries)

    if exists["items"]:
        logger.info("Using collection %s", exists["items"][0]["uuid"])
    else:
        collection.save_new(name=name,
                            owner_uuid=arvrunner.project_uuid,
                            ensure_unique_name=True,
                            num_retries=arvrunner.num_retries)
        logger.info("Uploaded to %s", collection.manifest_locator())

    return collection.portable_data_hash()
コード例 #4
0
ファイル: run.py プロジェクト: chapmanb/arvados
def uploadfiles(files, api, dry_run=False, num_retries=0,
                project=None,
                fnPattern="$(file %s/%s)",
                name=None,
                collection=None,
                packed=True):
    # Find the smallest path prefix that includes all the files that need to be uploaded.
    # This starts at the root and iteratively removes common parent directory prefixes
    # until all file paths no longer have a common parent.
    if files:
        n = True
        pathprefix = "/"
        while n:
            pathstep = None
            for c in files:
                if pathstep is None:
                    sp = c.fn.split('/')
                    if len(sp) < 2:
                        # no parent directories left
                        n = False
                        break
                    # path step takes next directory
                    pathstep = sp[0] + "/"
                else:
                    # check if pathstep is common prefix for all files
                    if not c.fn.startswith(pathstep):
                        n = False
                        break
            if n:
                # pathstep is common parent directory for all files, so remove the prefix
                # from each path
                pathprefix += pathstep
                for c in files:
                    c.fn = c.fn[len(pathstep):]

        logger.info("Upload local files: \"%s\"", '" "'.join([c.fn for c in files]))

    if dry_run:
        logger.info("$(input) is %s", pathprefix.rstrip('/'))
        pdh = "$(input)"
    else:
        files = sorted(files, key=lambda x: x.fn)
        if collection is None:
            collection = arvados.collection.Collection(api_client=api, num_retries=num_retries)
        prev = ""
        for f in files:
            localpath = os.path.join(pathprefix, f.fn)
            if prev and localpath.startswith(prev+"/"):
                # If this path is inside an already uploaded subdirectory,
                # don't redundantly re-upload it.
                # e.g. we uploaded /tmp/foo and the next file is /tmp/foo/bar
                # skip it because it starts with "/tmp/foo/"
                continue
            prev = localpath
            if os.path.isfile(localpath):
                write_file(collection, pathprefix, f.fn, not packed)
            elif os.path.isdir(localpath):
                for root, dirs, iterfiles in os.walk(localpath):
                    root = root[len(pathprefix):]
                    for src in iterfiles:
                        write_file(collection, pathprefix, os.path.join(root, src), not packed)

        pdh = None
        if len(collection) > 0:
            # non-empty collection
            filters = [["portable_data_hash", "=", collection.portable_data_hash()]]
            name_pdh = "%s (%s)" % (name, collection.portable_data_hash())
            if name:
                filters.append(["name", "=", name_pdh])
            if project:
                filters.append(["owner_uuid", "=", project])

            # do the list / create in a loop with up to 2 tries as we are using `ensure_unique_name=False`
            # and there is a potential race with other workflows that may have created the collection
            # between when we list it and find it does not exist and when we attempt to create it.
            tries = 2
            while pdh is None and tries > 0:
                exists = api.collections().list(filters=filters, limit=1).execute(num_retries=num_retries)

                if exists["items"]:
                    item = exists["items"][0]
                    pdh = item["portable_data_hash"]
                    logger.info("Using collection %s (%s)", pdh, item["uuid"])
                else:
                    try:
                        collection.save_new(name=name_pdh, owner_uuid=project, ensure_unique_name=False)
                        pdh = collection.portable_data_hash()
                        logger.info("Uploaded to %s (%s)", pdh, collection.manifest_locator())
                    except arvados.errors.ApiError as ae:
                        tries -= 1
            if pdh is None:
                # Something weird going on here, probably a collection
                # with a conflicting name but wrong PDH.  We won't
                # able to reuse it but we still need to save our
                # collection, so so save it with unique name.
                logger.info("Name conflict on '%s', existing collection has an unexpected portable data hash", name_pdh)
                collection.save_new(name=name_pdh, owner_uuid=project, ensure_unique_name=True)
                pdh = collection.portable_data_hash()
                logger.info("Uploaded to %s (%s)", pdh, collection.manifest_locator())
        else:
            # empty collection
            pdh = collection.portable_data_hash()
            assert (pdh == config.EMPTY_BLOCK_LOCATOR), "Empty collection portable_data_hash did not have expected locator, was %s" % pdh
            logger.info("Using empty collection %s", pdh)

    for c in files:
        c.keepref = "%s/%s" % (pdh, c.fn)
        c.fn = fnPattern % (pdh, c.fn)
コード例 #5
0
ファイル: fsaccess.py プロジェクト: tomclegg/arvados
 def listdir(self, fn):  # type: (unicode) -> List[unicode]
     collection, rest = self.get_collection(fn)
     if collection:
         if rest:
             dir = collection.find(rest)
         else:
             dir = collection
         if dir is None:
             raise IOError(errno.ENOENT, "Directory '%s' in '%s' not found" % (rest, collection.portable_data_hash()))
         if not isinstance(dir, arvados.collection.RichCollectionBase):
             raise IOError(errno.ENOENT, "Path '%s' in '%s' is not a Directory" % (rest, collection.portable_data_hash()))
         return [abspath(l, fn) for l in dir.keys()]
     else:
         return super(CollectionFsAccess, self).listdir(fn)
コード例 #6
0
ファイル: pathmapper.py プロジェクト: top501/arvados
    def setup(self, referenced_files, basedir):
        # type: (List[Any], unicode) -> None
        uploadfiles = set()

        collection = None
        if self.single_collection:
            collection = arvados.collection.Collection(
                api_client=self.arvrunner.api,
                keep_client=self.arvrunner.keep_client,
                num_retries=self.arvrunner.num_retries)

        already_uploaded = self.arvrunner.get_uploaded()
        copied_files = set()
        for k in referenced_files:
            loc = k["location"]
            if loc in already_uploaded:
                v = already_uploaded[loc]
                self._pathmap[loc] = MapperEnt(
                    v.resolved,
                    self.collection_pattern % urllib.unquote(v.resolved[5:]),
                    v.type, True)
                if self.single_collection:
                    basename = k["basename"]
                    if basename not in collection:
                        self.addentry(
                            {
                                "location": loc,
                                "class": v.type,
                                "basename": basename
                            }, collection, ".", [])
                        copied_files.add((loc, basename, v.type))

        for srcobj in referenced_files:
            self.visit(srcobj, uploadfiles)

        arvados.commands.run.uploadfiles(
            [u[2] for u in uploadfiles],
            self.arvrunner.api,
            dry_run=False,
            num_retries=self.arvrunner.num_retries,
            fnPattern="keep:%s/%s",
            name=self.name,
            project=self.arvrunner.project_uuid,
            collection=collection)

        for src, ab, st in uploadfiles:
            self._pathmap[src] = MapperEnt(
                urllib.quote(st.fn,
                             "/:+@"), self.collection_pattern % st.fn[5:],
                "Directory" if os.path.isdir(ab) else "File", True)
            self.arvrunner.add_uploaded(src, self._pathmap[src])

        for loc, basename, cls in copied_files:
            fn = "keep:%s/%s" % (collection.portable_data_hash(), basename)
            self._pathmap[loc] = MapperEnt(urllib.quote(fn, "/:+@"),
                                           self.collection_pattern % fn[5:],
                                           cls, True)

        for srcobj in referenced_files:
            subdirs = []
            if srcobj["class"] == "Directory" and srcobj[
                    "location"] not in self._pathmap:
                c = arvados.collection.Collection(
                    api_client=self.arvrunner.api,
                    keep_client=self.arvrunner.keep_client,
                    num_retries=self.arvrunner.num_retries)
                for l in srcobj.get("listing", []):
                    self.addentry(l, c, ".", subdirs)

                check = self.arvrunner.api.collections().list(
                    filters=[[
                        "portable_data_hash", "=",
                        c.portable_data_hash()
                    ]],
                    limit=1).execute(num_retries=self.arvrunner.num_retries)
                if not check["items"]:
                    c.save_new(owner_uuid=self.arvrunner.project_uuid)

                ab = self.collection_pattern % c.portable_data_hash()
                self._pathmap[srcobj["location"]] = MapperEnt(
                    "keep:" + c.portable_data_hash(), ab, "Directory", True)
            elif srcobj["class"] == "File" and (
                    srcobj.get("secondaryFiles") or
                (srcobj["location"].startswith("_:")
                 and "contents" in srcobj)):

                c = arvados.collection.Collection(
                    api_client=self.arvrunner.api,
                    keep_client=self.arvrunner.keep_client,
                    num_retries=self.arvrunner.num_retries)
                self.addentry(srcobj, c, ".", subdirs)

                check = self.arvrunner.api.collections().list(
                    filters=[[
                        "portable_data_hash", "=",
                        c.portable_data_hash()
                    ]],
                    limit=1).execute(num_retries=self.arvrunner.num_retries)
                if not check["items"]:
                    c.save_new(owner_uuid=self.arvrunner.project_uuid)

                ab = self.file_pattern % (c.portable_data_hash(),
                                          srcobj["basename"])
                self._pathmap[srcobj["location"]] = MapperEnt(
                    "keep:%s/%s" %
                    (c.portable_data_hash(), srcobj["basename"]), ab, "File",
                    True)
                if srcobj.get("secondaryFiles"):
                    ab = self.collection_pattern % c.portable_data_hash()
                    self._pathmap["_:" + unicode(uuid.uuid4())] = MapperEnt(
                        "keep:" + c.portable_data_hash(), ab, "Directory",
                        True)

            if subdirs:
                for loc, sub in subdirs:
                    # subdirs will all start with "./", strip it off
                    ab = self.file_pattern % (c.portable_data_hash(), sub[2:])
                    self._pathmap[loc] = MapperEnt(
                        "keep:%s/%s" % (c.portable_data_hash(), sub[2:]), ab,
                        "Directory", True)

        self.keepdir = None
コード例 #7
0
ファイル: run.py プロジェクト: wtsi-hgi/arvados
def uploadfiles(files, api, dry_run=False, num_retries=0,
                project=None,
                fnPattern="$(file %s/%s)",
                name=None,
                collection=None,
                packed=True):
    # Find the smallest path prefix that includes all the files that need to be uploaded.
    # This starts at the root and iteratively removes common parent directory prefixes
    # until all file paths no longer have a common parent.
    if files:
        n = True
        pathprefix = "/"
        while n:
            pathstep = None
            for c in files:
                if pathstep is None:
                    sp = c.fn.split('/')
                    if len(sp) < 2:
                        # no parent directories left
                        n = False
                        break
                    # path step takes next directory
                    pathstep = sp[0] + "/"
                else:
                    # check if pathstep is common prefix for all files
                    if not c.fn.startswith(pathstep):
                        n = False
                        break
            if n:
                # pathstep is common parent directory for all files, so remove the prefix
                # from each path
                pathprefix += pathstep
                for c in files:
                    c.fn = c.fn[len(pathstep):]

        logger.info("Upload local files: \"%s\"", '" "'.join([c.fn for c in files]))

    if dry_run:
        logger.info("$(input) is %s", pathprefix.rstrip('/'))
        pdh = "$(input)"
    else:
        files = sorted(files, key=lambda x: x.fn)
        if collection is None:
            collection = arvados.collection.Collection(api_client=api, num_retries=num_retries)
        prev = ""
        for f in files:
            localpath = os.path.join(pathprefix, f.fn)
            if prev and localpath.startswith(prev+"/"):
                # If this path is inside an already uploaded subdirectory,
                # don't redundantly re-upload it.
                # e.g. we uploaded /tmp/foo and the next file is /tmp/foo/bar
                # skip it because it starts with "/tmp/foo/"
                continue
            prev = localpath
            if os.path.isfile(localpath):
                write_file(collection, pathprefix, f.fn, not packed)
            elif os.path.isdir(localpath):
                for root, dirs, iterfiles in os.walk(localpath):
                    root = root[len(pathprefix):]
                    for src in iterfiles:
                        write_file(collection, pathprefix, os.path.join(root, src), not packed)

        filters=[["portable_data_hash", "=", collection.portable_data_hash()]]
        if name:
            filters.append(["name", "like", name+"%"])
        if project:
            filters.append(["owner_uuid", "=", project])

        exists = api.collections().list(filters=filters, limit=1).execute(num_retries=num_retries)

        if exists["items"]:
            item = exists["items"][0]
            pdh = item["portable_data_hash"]
            logger.info("Using collection %s (%s)", pdh, item["uuid"])
        elif len(collection) > 0:
            collection.save_new(name=name, owner_uuid=project, ensure_unique_name=True)
            pdh = collection.portable_data_hash()
            logger.info("Uploaded to %s (%s)", pdh, collection.manifest_locator())

    for c in files:
        c.keepref = "%s/%s" % (pdh, c.fn)
        c.fn = fnPattern % (pdh, c.fn)
コード例 #8
0
ファイル: run.py プロジェクト: top501/arvados
def uploadfiles(files,
                api,
                dry_run=False,
                num_retries=0,
                project=None,
                fnPattern="$(file %s/%s)",
                name=None,
                collection=None):
    # Find the smallest path prefix that includes all the files that need to be uploaded.
    # This starts at the root and iteratively removes common parent directory prefixes
    # until all file paths no longer have a common parent.
    if files:
        n = True
        pathprefix = "/"
        while n:
            pathstep = None
            for c in files:
                if pathstep is None:
                    sp = c.fn.split('/')
                    if len(sp) < 2:
                        # no parent directories left
                        n = False
                        break
                    # path step takes next directory
                    pathstep = sp[0] + "/"
                else:
                    # check if pathstep is common prefix for all files
                    if not c.fn.startswith(pathstep):
                        n = False
                        break
            if n:
                # pathstep is common parent directory for all files, so remove the prefix
                # from each path
                pathprefix += pathstep
                for c in files:
                    c.fn = c.fn[len(pathstep):]

        logger.info("Upload local files: \"%s\"",
                    '" "'.join([c.fn for c in files]))

    if dry_run:
        logger.info("$(input) is %s", pathprefix.rstrip('/'))
        pdh = "$(input)"
    else:
        files = sorted(files, key=lambda x: x.fn)
        if collection is None:
            collection = arvados.collection.Collection(api_client=api,
                                                       num_retries=num_retries)
        prev = ""
        for f in files:
            localpath = os.path.join(pathprefix, f.fn)
            if prev and localpath.startswith(prev + "/"):
                # If this path is inside an already uploaded subdirectory,
                # don't redundantly re-upload it.
                # e.g. we uploaded /tmp/foo and the next file is /tmp/foo/bar
                # skip it because it starts with "/tmp/foo/"
                continue
            prev = localpath
            if os.path.isfile(localpath):
                write_file(collection, pathprefix, f.fn)
            elif os.path.isdir(localpath):
                for root, dirs, iterfiles in os.walk(localpath):
                    root = root[len(pathprefix):]
                    for src in iterfiles:
                        write_file(collection, pathprefix,
                                   os.path.join(root, src))

        filters = [[
            "portable_data_hash", "=",
            collection.portable_data_hash()
        ]]
        if name:
            filters.append(["name", "like", name + "%"])
        if project:
            filters.append(["owner_uuid", "=", project])

        exists = api.collections().list(
            filters=filters, limit=1).execute(num_retries=num_retries)

        if exists["items"]:
            item = exists["items"][0]
            pdh = item["portable_data_hash"]
            logger.info("Using collection %s (%s)", pdh, item["uuid"])
        elif len(collection) > 0:
            collection.save_new(name=name,
                                owner_uuid=project,
                                ensure_unique_name=True)
            pdh = collection.portable_data_hash()
            logger.info("Uploaded to %s (%s)", pdh,
                        collection.manifest_locator())

    for c in files:
        c.keepref = "%s/%s" % (pdh, c.fn)
        c.fn = fnPattern % (pdh, c.fn)
コード例 #9
0
def uploadfiles(files,
                api,
                dry_run=False,
                num_retries=0,
                project=None,
                fnPattern="$(file %s/%s)",
                name=None,
                collection=None,
                packed=True):
    # Find the smallest path prefix that includes all the files that need to be uploaded.
    # This starts at the root and iteratively removes common parent directory prefixes
    # until all file paths no longer have a common parent.
    if files:
        n = True
        pathprefix = "/"
        while n:
            pathstep = None
            for c in files:
                if pathstep is None:
                    sp = c.fn.split('/')
                    if len(sp) < 2:
                        # no parent directories left
                        n = False
                        break
                    # path step takes next directory
                    pathstep = sp[0] + "/"
                else:
                    # check if pathstep is common prefix for all files
                    if not c.fn.startswith(pathstep):
                        n = False
                        break
            if n:
                # pathstep is common parent directory for all files, so remove the prefix
                # from each path
                pathprefix += pathstep
                for c in files:
                    c.fn = c.fn[len(pathstep):]

        logger.info("Upload local files: \"%s\"",
                    '" "'.join([c.fn for c in files]))

    if dry_run:
        logger.info("$(input) is %s", pathprefix.rstrip('/'))
        pdh = "$(input)"
    else:
        files = sorted(files, key=lambda x: x.fn)
        if collection is None:
            collection = arvados.collection.Collection(api_client=api,
                                                       num_retries=num_retries)
        prev = ""
        for f in files:
            localpath = os.path.join(pathprefix, f.fn)
            if prev and localpath.startswith(prev + "/"):
                # If this path is inside an already uploaded subdirectory,
                # don't redundantly re-upload it.
                # e.g. we uploaded /tmp/foo and the next file is /tmp/foo/bar
                # skip it because it starts with "/tmp/foo/"
                continue
            prev = localpath
            if os.path.isfile(localpath):
                write_file(collection, pathprefix, f.fn, not packed)
            elif os.path.isdir(localpath):
                for root, dirs, iterfiles in os.walk(localpath):
                    root = root[len(pathprefix):]
                    for src in iterfiles:
                        write_file(collection, pathprefix,
                                   os.path.join(root, src), not packed)

        pdh = None
        if len(collection) > 0:
            # non-empty collection
            filters = [[
                "portable_data_hash", "=",
                collection.portable_data_hash()
            ]]
            name_pdh = "%s (%s)" % (name, collection.portable_data_hash())
            if name:
                filters.append(["name", "=", name_pdh])
            if project:
                filters.append(["owner_uuid", "=", project])

            # do the list / create in a loop with up to 2 tries as we are using `ensure_unique_name=False`
            # and there is a potential race with other workflows that may have created the collection
            # between when we list it and find it does not exist and when we attempt to create it.
            tries = 2
            while pdh is None and tries > 0:
                exists = api.collections().list(
                    filters=filters, limit=1).execute(num_retries=num_retries)

                if exists["items"]:
                    item = exists["items"][0]
                    pdh = item["portable_data_hash"]
                    logger.info("Using collection %s (%s)", pdh, item["uuid"])
                else:
                    try:
                        collection.save_new(name=name_pdh,
                                            owner_uuid=project,
                                            ensure_unique_name=False)
                        pdh = collection.portable_data_hash()
                        logger.info("Uploaded to %s (%s)", pdh,
                                    collection.manifest_locator())
                    except arvados.errors.ApiError as ae:
                        tries -= 1
            if pdh is None:
                # Something weird going on here, probably a collection
                # with a conflicting name but wrong PDH.  We won't
                # able to reuse it but we still need to save our
                # collection, so so save it with unique name.
                logger.info(
                    "Name conflict on '%s', existing collection has an unexpected portable data hash",
                    name_pdh)
                collection.save_new(name=name_pdh,
                                    owner_uuid=project,
                                    ensure_unique_name=True)
                pdh = collection.portable_data_hash()
                logger.info("Uploaded to %s (%s)", pdh,
                            collection.manifest_locator())
        else:
            # empty collection
            pdh = collection.portable_data_hash()
            assert (
                pdh == config.EMPTY_BLOCK_LOCATOR
            ), "Empty collection portable_data_hash did not have expected locator, was %s" % pdh
            logger.debug("Using empty collection %s", pdh)

    for c in files:
        c.keepref = "%s/%s" % (pdh, c.fn)
        c.fn = fnPattern % (pdh, c.fn)