def _process_stages(repo, sub_targets, stages, no_commit, pbar, to_remote, to_cache, **kwargs): link_failures = [] from dvc.dvcfile import Dvcfile from ..output.base import OutputDoesNotExistError if to_remote or to_cache: # Already verified in the add() (stage, ) = stages (target, ) = sub_targets (out, ) = stage.outs if to_remote: out.hash_info = repo.cloud.transfer( target, jobs=kwargs.get("jobs"), remote=kwargs.get("remote"), command="add", ) else: from dvc.fs import get_cloud_fs from dvc.objects import transfer from_fs = get_cloud_fs(repo, url=target) out.hash_info = transfer( out.odb, from_fs, from_fs.path_info, jobs=kwargs.get("jobs"), ) out.checkout() Dvcfile(repo, stage.path).dump(stage) return link_failures with Tqdm( total=len(stages), desc="Processing", unit="file", disable=len(stages) == 1, ) as pbar_stages: for stage in stages: try: stage.save() except OutputDoesNotExistError: pbar.n -= 1 raise try: if not no_commit: stage.commit() except CacheLinkError: link_failures.append(stage) Dvcfile(repo, stage.path).dump(stage) pbar_stages.update() return link_failures
def transfer(self, from_fs, from_info, jobs=None, no_progress_bar=False): from dvc.objects import transfer return transfer( self.odb, from_fs, from_info, jobs=jobs, no_progress_bar=no_progress_bar, )