def iter_blobs(self, predicate=lambda t: True): """ :return: Iterator yielding tuples of Blob objects and stages, tuple(stage, Blob) :param predicate: Function(t) returning True if tuple(stage, Blob) should be yielded by the iterator. A default filter, the BlobFilter, allows you to yield blobs only if they match a given list of paths. """ for entry in mviter(self.entries): blob = entry.to_blob(self.repo) blob.size = entry.size output = (entry.stage, blob) if predicate(output): yield output
def unmerged_blobs(self): """ :return: Iterator yielding dict(path : list( tuple( stage, Blob, ...))), being a dictionary associating a path in the index with a list containing sorted stage/blob pairs :note: Blobs that have been removed in one side simply do not exist in the given stage. I.e. a file removed on the 'other' branch whose entries are at stage 3 will not have a stage 3 entry. """ is_unmerged_blob = lambda t: t[0] != 0 path_map = dict() for stage, blob in self.iter_blobs(is_unmerged_blob): path_map.setdefault(blob.path, list()).append((stage, blob)) # END for each unmerged blob for l in mviter(path_map): l.sort() return path_map
def checkout(self, paths=None, force=False, fprogress=lambda *args: None, **kwargs): """Checkout the given paths or all files from the version known to the index into the working tree. :note: Be sure you have written pending changes using the ``write`` method in case you have altered the enties dictionary directly :param paths: If None, all paths in the index will be checked out. Otherwise an iterable of relative or absolute paths or a single path pointing to files or directories in the index is expected. :param force: If True, existing files will be overwritten even if they contain local modifications. If False, these will trigger a CheckoutError. :param fprogress: see Index.add_ for signature and explanation. The provided progress information will contain None as path and item if no explicit paths are given. Otherwise progress information will be send prior and after a file has been checked out :param kwargs: Additional arguments to be pasesd to git-checkout-index :return: iterable yielding paths to files which have been checked out and are guaranteed to match the version stored in the index :raise CheckoutError: If at least one file failed to be checked out. This is a summary, hence it will checkout as many files as it can anyway. If one of files or directories do not exist in the index ( as opposed to the original git command who ignores them ). Raise GitCommandError if error lines could not be parsed - this truly is an exceptional state .. note:: The checkout is limited to checking out the files in the index. Files which are not in the index anymore and exist in the working tree will not be deleted. This behaviour is fundamentally different to *head.checkout*, i.e. if you want git-checkout like behaviour, use head.checkout instead of index.checkout. """ args = ["--index"] if force: args.append("--force") def handle_stderr(proc, iter_checked_out_files): stderr = proc.stderr.read() if not stderr: return # line contents: stderr = stderr.decode(defenc) # git-checkout-index: this already exists failed_files = list() failed_reasons = list() unknown_lines = list() endings = (' already exists', ' is not in the cache', ' does not exist at stage', ' is unmerged') for line in stderr.splitlines(): if not line.startswith( "git checkout-index: ") and not line.startswith( "git-checkout-index: "): is_a_dir = " is a directory" unlink_issue = "unable to unlink old '" already_exists_issue = ' already exists, no checkout' # created by entry.c:checkout_entry(...) if line.endswith(is_a_dir): failed_files.append(line[:-len(is_a_dir)]) failed_reasons.append(is_a_dir) elif line.startswith(unlink_issue): failed_files.append( line[len(unlink_issue):line.rfind("'")]) failed_reasons.append(unlink_issue) elif line.endswith(already_exists_issue): failed_files.append(line[:-len(already_exists_issue)]) failed_reasons.append(already_exists_issue) else: unknown_lines.append(line) continue # END special lines parsing for e in endings: if line.endswith(e): failed_files.append(line[20:-len(e)]) failed_reasons.append(e) break # END if ending matches # END for each possible ending # END for each line if unknown_lines: raise GitCommandError(("git-checkout-index", ), 128, stderr) if failed_files: valid_files = list( set(iter_checked_out_files) - set(failed_files)) raise CheckoutError( "Some files could not be checked out from the index due to local modifications", failed_files, valid_files, failed_reasons) # END stderr handler if paths is None: args.append("--all") kwargs['as_process'] = 1 fprogress(None, False, None) proc = self.repo.git.checkout_index(*args, **kwargs) proc.wait() fprogress(None, True, None) rval_iter = (e.path for e in mviter(self.entries)) handle_stderr(proc, rval_iter) return rval_iter else: if isinstance(paths, string_types): paths = [paths] # make sure we have our entries loaded before we start checkout_index # which will hold a lock on it. We try to get the lock as well during # our entries initialization self.entries args.append("--stdin") kwargs['as_process'] = True kwargs['istream'] = subprocess.PIPE proc = self.repo.git.checkout_index(args, **kwargs) make_exc = lambda: GitCommandError( ("git-checkout-index", ) + tuple(args), 128, proc.stderr.read( )) checked_out_files = list() for path in paths: co_path = to_native_path_linux(self._to_relative_path(path)) # if the item is not in the index, it could be a directory path_is_directory = False try: self.entries[(co_path, 0)] except KeyError: dir = co_path if not dir.endswith('/'): dir += '/' for entry in mviter(self.entries): if entry.path.startswith(dir): p = entry.path self._write_path_to_stdin(proc, p, p, make_exc, fprogress, read_from_stdout=False) checked_out_files.append(p) path_is_directory = True # END if entry is in directory # END for each entry # END path exception handlnig if not path_is_directory: self._write_path_to_stdin(proc, co_path, path, make_exc, fprogress, read_from_stdout=False) checked_out_files.append(co_path) # END path is a file # END for each path self._flush_stdin_and_wait(proc, ignore_stdout=True) handle_stderr(proc, checked_out_files) return checked_out_files # END paths handling assert "Should not reach this point"
def checkout(self, paths=None, force=False, fprogress=lambda *args: None, **kwargs): """Checkout the given paths or all files from the version known to the index into the working tree. :note: Be sure you have written pending changes using the ``write`` method in case you have altered the enties dictionary directly :param paths: If None, all paths in the index will be checked out. Otherwise an iterable of relative or absolute paths or a single path pointing to files or directories in the index is expected. :param force: If True, existing files will be overwritten even if they contain local modifications. If False, these will trigger a CheckoutError. :param fprogress: see Index.add_ for signature and explanation. The provided progress information will contain None as path and item if no explicit paths are given. Otherwise progress information will be send prior and after a file has been checked out :param kwargs: Additional arguments to be pasesd to git-checkout-index :return: iterable yielding paths to files which have been checked out and are guaranteed to match the version stored in the index :raise CheckoutError: If at least one file failed to be checked out. This is a summary, hence it will checkout as many files as it can anyway. If one of files or directories do not exist in the index ( as opposed to the original git command who ignores them ). Raise GitCommandError if error lines could not be parsed - this truly is an exceptional state .. note:: The checkout is limited to checking out the files in the index. Files which are not in the index anymore and exist in the working tree will not be deleted. This behaviour is fundamentally different to *head.checkout*, i.e. if you want git-checkout like behaviour, use head.checkout instead of index.checkout. """ args = ["--index"] if force: args.append("--force") def handle_stderr(proc, iter_checked_out_files): stderr = proc.stderr.read() if not stderr: return # line contents: stderr = stderr.decode(defenc) # git-checkout-index: this already exists failed_files = list() failed_reasons = list() unknown_lines = list() endings = (' already exists', ' is not in the cache', ' does not exist at stage', ' is unmerged') for line in stderr.splitlines(): if not line.startswith("git checkout-index: ") and not line.startswith("git-checkout-index: "): is_a_dir = " is a directory" unlink_issue = "unable to unlink old '" already_exists_issue = ' already exists, no checkout' # created by entry.c:checkout_entry(...) if line.endswith(is_a_dir): failed_files.append(line[:-len(is_a_dir)]) failed_reasons.append(is_a_dir) elif line.startswith(unlink_issue): failed_files.append(line[len(unlink_issue):line.rfind("'")]) failed_reasons.append(unlink_issue) elif line.endswith(already_exists_issue): failed_files.append(line[:-len(already_exists_issue)]) failed_reasons.append(already_exists_issue) else: unknown_lines.append(line) continue # END special lines parsing for e in endings: if line.endswith(e): failed_files.append(line[20:-len(e)]) failed_reasons.append(e) break # END if ending matches # END for each possible ending # END for each line if unknown_lines: raise GitCommandError(("git-checkout-index", ), 128, stderr) if failed_files: valid_files = list(set(iter_checked_out_files) - set(failed_files)) raise CheckoutError( "Some files could not be checked out from the index due to local modifications", failed_files, valid_files, failed_reasons) # END stderr handler if paths is None: args.append("--all") kwargs['as_process'] = 1 fprogress(None, False, None) proc = self.repo.git.checkout_index(*args, **kwargs) proc.wait() fprogress(None, True, None) rval_iter = (e.path for e in mviter(self.entries)) handle_stderr(proc, rval_iter) return rval_iter else: if isinstance(paths, string_types): paths = [paths] # make sure we have our entries loaded before we start checkout_index # which will hold a lock on it. We try to get the lock as well during # our entries initialization self.entries args.append("--stdin") kwargs['as_process'] = True kwargs['istream'] = subprocess.PIPE proc = self.repo.git.checkout_index(args, **kwargs) make_exc = lambda: GitCommandError(("git-checkout-index",) + tuple(args), 128, proc.stderr.read()) checked_out_files = list() for path in paths: co_path = to_native_path_linux(self._to_relative_path(path)) # if the item is not in the index, it could be a directory path_is_directory = False try: self.entries[(co_path, 0)] except KeyError: dir = co_path if not dir.endswith('/'): dir += '/' for entry in mviter(self.entries): if entry.path.startswith(dir): p = entry.path self._write_path_to_stdin(proc, p, p, make_exc, fprogress, read_from_stdout=False) checked_out_files.append(p) path_is_directory = True # END if entry is in directory # END for each entry # END path exception handlnig if not path_is_directory: self._write_path_to_stdin(proc, co_path, path, make_exc, fprogress, read_from_stdout=False) checked_out_files.append(co_path) # END path is a file # END for each path self._flush_stdin_and_wait(proc, ignore_stdout=True) handle_stderr(proc, checked_out_files) return checked_out_files # END paths handling assert "Should not reach this point"