def download(self, to, jobs=None): from dvc.objects.checkout import checkout for odb, objs in self.get_used_objs().items(): self.repo.cloud.pull(objs, jobs=jobs, odb=odb) obj = self.get_obj() checkout( to.fs_path, to.fs, obj, self.repo.odb.local, dvcignore=None, state=self.repo.state, )
def commit(self, filter_info=None): if not self.exists: raise self.DoesNotExistError(self) assert self.hash_info if self.use_cache: granular = ( self.is_dir_checksum and filter_info and filter_info != self.path_info ) if granular: obj = self._commit_granular_dir(filter_info) else: staging, obj = ostage( self.odb, filter_info or self.path_info, self.fs, self.odb.fs.PARAM_CHECKSUM, dvcignore=self.dvcignore, ) otransfer( staging, self.odb, {obj.hash_info}, shallow=False, move=True, ) checkout( filter_info or self.path_info, self.fs, obj, self.odb, relink=True, dvcignore=self.dvcignore, state=self.repo.state, ) self.set_exec()
def checkout( self, force=False, progress_callback=None, relink=False, filter_info=None, allow_missing=False, checkpoint_reset=False, **kwargs, ): if not self.use_cache: if progress_callback: progress_callback( str(self.path_info), self.get_files_number(filter_info) ) return None obj = self.get_obj(filter_info=filter_info) if not obj and (filter_info and filter_info != self.path_info): # backward compatibility return None if self.checkpoint and checkpoint_reset: if self.exists: self.remove() return None added = not self.exists try: modified = checkout( filter_info or self.path_info, self.fs, obj, self.odb, force=force, progress_callback=progress_callback, relink=relink, state=self.repo.state, **kwargs, ) except CheckoutError: if allow_missing or self.checkpoint: return None raise self.set_exec() return added, False if added else modified