def repo_creator(self): prefix = 'non_' if bare: prefix = '' # END handle prefix repo_dir = tempfile.mktemp(prefix="%sbare_%s" % (prefix, func.__name__)) rw_repo = self.rorepo.clone(repo_dir, shared=True, bare=bare, n=True) rw_repo.head.commit = rw_repo.commit(working_tree_ref) if not bare: rw_repo.head.reference.checkout() # END handle checkout prev_cwd = os.getcwd() os.chdir(rw_repo.working_dir) try: try: return func(self, rw_repo) except: log.info("Keeping repo after failure: %s", repo_dir) repo_dir = None raise finally: os.chdir(prev_cwd) rw_repo.git.clear_cache() rw_repo = None if repo_dir is not None: gc.collect() gitdb.util.mman.collect() gc.collect() rmtree(repo_dir)
def remote_repo_creator(self): rw_daemon_repo_dir = tempfile.mktemp(prefix="daemon_repo-%s-" % func.__name__) rw_repo_dir = tempfile.mktemp(prefix="daemon_cloned_repo-%s-" % func.__name__) rw_daemon_repo = self.rorepo.clone(rw_daemon_repo_dir, shared=True, bare=True) # recursive alternates info ? rw_repo = rw_daemon_repo.clone(rw_repo_dir, shared=True, bare=False, n=True) try: rw_repo.head.commit = working_tree_ref rw_repo.head.reference.checkout() # prepare for git-daemon rw_daemon_repo.daemon_export = True # this thing is just annoying ! with rw_daemon_repo.config_writer() as crw: section = "daemon" try: crw.add_section(section) except Exception: pass crw.set(section, "receivepack", True) # Initialize the remote - first do it as local remote and pull, then # we change the url to point to the daemon. d_remote = Remote.create(rw_repo, "daemon_origin", rw_daemon_repo_dir) d_remote.fetch() base_daemon_path, rel_repo_dir = osp.split(rw_daemon_repo_dir) remote_repo_url = Git.polish_url("git://localhost:%s/%s" % (GIT_DAEMON_PORT, rel_repo_dir)) with d_remote.config_writer as cw: cw.set('url', remote_repo_url) with git_daemon_launched(Git.polish_url(base_daemon_path, is_cygwin=False), # No daemon in Cygwin. '127.0.0.1', GIT_DAEMON_PORT): # Try listing remotes, to diagnose whether the daemon is up. rw_repo.git.ls_remote(d_remote) with cwd(rw_repo.working_dir): try: return func(self, rw_repo, rw_daemon_repo) except: log.info("Keeping repos after failure: \n rw_repo_dir: %s \n rw_daemon_repo_dir: %s", rw_repo_dir, rw_daemon_repo_dir) rw_repo_dir = rw_daemon_repo_dir = None raise finally: rw_repo.git.clear_cache() rw_daemon_repo.git.clear_cache() del rw_repo del rw_daemon_repo import gc gc.collect() if rw_repo_dir: rmtree(rw_repo_dir) if rw_daemon_repo_dir: rmtree(rw_daemon_repo_dir)
def fetch_repo(self, url, branch): if os.path.exists(".git"): gc.collect() repo = git.Repo(getcwd()) repo.git.clear_cache() rmtree(repo.git_dir) self.repo = git.Repo.init(getcwd()) r = Remote(self.repo, name="origin") if r in Remote.list_items(self.repo): self.repo.delete_remote(r) self.repo.git.remote("add", "origin", url) self.repo.git.fetch("origin", "-t") remote_branch = self.check_remote_branch() if not remote_branch: print("Fetch tag failed, just fetch all the source code") self.repo.git.fetch("origin") remote_branch = self.check_remote_branch() if not remote_branch: print("No branch found in the remote repo, patch failed") shutil.rmtree(".git") return False self.repo.git.checkout(remote_branch, "--force", b="master") print( "Branch 'master' set up to track remote branch '{}' from 'origin'". format(remote_branch)) self.check_commit() self.repo.git.checkout(self.commit, "--force", b=branch) print("Switched to a new branch '{}'".format(branch))
def repo_creator(self): prefix = 'non_' if bare: prefix = '' # END handle prefix repo_dir = tempfile.mktemp(prefix="%sbare_%s" % (prefix, func.__name__)) rw_repo = self.rorepo.clone(repo_dir, shared=True, bare=bare, n=True) rw_repo.head.commit = rw_repo.commit(working_tree_ref) if not bare: rw_repo.head.reference.checkout() # END handle checkout prev_cwd = os.getcwd() os.chdir(rw_repo.working_dir) try: try: return func(self, rw_repo) except: # noqa E722 log.info("Keeping repo after failure: %s", repo_dir) repo_dir = None raise finally: os.chdir(prev_cwd) rw_repo.git.clear_cache() rw_repo = None if repo_dir is not None: gc.collect() gitdb.util.mman.collect() gc.collect() rmtree(repo_dir)
def tearDown(self): super(TestBigRepoRW, self).tearDown() if self.gitrwrepo is not None: rmtree(self.gitrwrepo.working_dir) self.gitrwrepo.git.clear_cache() self.gitrwrepo = None self.puregitrwrepo.git.clear_cache() self.puregitrwrepo = None
def cleanup(self): """ Delete the repo working directory when this contextmanager is finished. """ try: yield self finally: rmtree(self.repo.working_dir)
def test_init(self): prev_cwd = os.getcwd() os.chdir(tempfile.gettempdir()) git_dir_rela = "repos/foo/bar.git" del_dir_abs = osp.abspath("repos") git_dir_abs = osp.abspath(git_dir_rela) try: # with specific path for path in (git_dir_rela, git_dir_abs): r = Repo.init(path=path, bare=True) self.assertIsInstance(r, Repo) assert r.bare is True assert not r.has_separate_working_tree() assert osp.isdir(r.git_dir) self._assert_empty_repo(r) # test clone clone_path = path + "_clone" rc = r.clone(clone_path) self._assert_empty_repo(rc) try: rmtree(clone_path) except OSError: # when relative paths are used, the clone may actually be inside # of the parent directory pass # END exception handling # try again, this time with the absolute version rc = Repo.clone_from(r.git_dir, clone_path) self._assert_empty_repo(rc) rmtree(git_dir_abs) try: rmtree(clone_path) except OSError: # when relative paths are used, the clone may actually be inside # of the parent directory pass # END exception handling # END for each path os.makedirs(git_dir_rela) os.chdir(git_dir_rela) r = Repo.init(bare=False) assert r.bare is False assert not r.has_separate_working_tree() self._assert_empty_repo(r) finally: try: rmtree(del_dir_abs) except OSError: pass os.chdir(prev_cwd)
def wrapper(self): path = tempfile.mktemp(prefix=func.__name__) os.mkdir(path) keep = False try: try: return func(self, path) except Exception: log.info("Test %s.%s failed, output is at %r\n", type(self).__name__, func.__name__, path) keep = True raise finally: # Need to collect here to be sure all handles have been closed. It appears # a windows-only issue. In fact things should be deleted, as well as # memory maps closed, once objects go out of scope. For some reason # though this is not the case here unless we collect explicitly. gc.collect() if not keep: rmtree(path)
def analyze_it(args): if args.noclone: logging.info('Take repo from disk') working_dir = os_path.join('.', args.dir) else: logging.info('Take repo from GitHub') repo = clone_repository(args.repo, args.dir) if not repo: raise Exception working_dir = repo.working_dir if not os_path.exists(working_dir): logging.error( 'Working directory doesn\'t exist {}'.format(working_dir)) raise Exception words = get_words_in_path(working_dir, tags[args.pos], node_handler[args.node], plang[args.plang]) top_words = get_top_words(words) report = create_report(top_words, format_handler[args.format]) output_report(report, args.file.format()) if args.clear: logging.info('Remove repo from disk after finish') rmtree(working_dir)
def remote_repo_creator(self): remote_repo_dir = tempfile.mktemp("remote_repo_%s" % func.__name__) repo_dir = tempfile.mktemp("remote_clone_non_bare_repo") rw_remote_repo = self.rorepo.clone(remote_repo_dir, shared=True, bare=True) # recursive alternates info ? rw_repo = rw_remote_repo.clone(repo_dir, shared=True, bare=False, n=True) rw_repo.head.commit = working_tree_ref rw_repo.head.reference.checkout() # prepare for git-daemon rw_remote_repo.daemon_export = True # this thing is just annoying ! with rw_remote_repo.config_writer() as crw: section = "daemon" try: crw.add_section(section) except Exception: pass crw.set(section, "receivepack", True) # Initialize the remote - first do it as local remote and pull, then # we change the url to point to the daemon. d_remote = Remote.create(rw_repo, "daemon_origin", remote_repo_dir) d_remote.fetch() base_path, rel_repo_dir = osp.split(remote_repo_dir) remote_repo_url = Git.polish_url("git://localhost:%s/%s" % (GIT_DAEMON_PORT, rel_repo_dir)) with d_remote.config_writer as cw: cw.set('url', remote_repo_url) try: gd = launch_git_daemon(Git.polish_url(base_path), '127.0.0.1', GIT_DAEMON_PORT) except Exception as ex: if is_win: msg = textwrap.dedent(""" The `git-daemon.exe` must be in PATH. For MINGW, look into .\Git\mingw64\libexec\git-core\), but problems with paths might appear. CYGWIN has no daemon, but if one exists, it gets along fine (has also paths problems) Anyhow, alternatively try starting `git-daemon` manually:""" ) else: msg = "Please try starting `git-daemon` manually:" msg += textwrap.dedent(""" git daemon --enable=receive-pack --base-path=%s %s You can also run the daemon on a different port by passing --port=<port>" and setting the environment variable GIT_PYTHON_TEST_GIT_DAEMON_PORT to <port> """ % (base_path, base_path)) raise AssertionError(ex, msg) # END make assertion else: # Try listing remotes, to diagnose whether the daemon is up. rw_repo.git.ls_remote(d_remote) # adjust working dir prev_cwd = os.getcwd() os.chdir(rw_repo.working_dir) try: return func(self, rw_repo, rw_remote_repo) except: log.info( "Keeping repos after failure: repo_dir = %s, remote_repo_dir = %s", repo_dir, remote_repo_dir) repo_dir = remote_repo_dir = None raise finally: os.chdir(prev_cwd) finally: try: log.debug("Killing git-daemon...") gd.proc.kill() except: ## Either it has died (and we're here), or it won't die, again here... pass rw_repo.git.clear_cache() rw_remote_repo.git.clear_cache() rw_repo = rw_remote_repo = None import gc gc.collect() if repo_dir: rmtree(repo_dir) if remote_repo_dir: rmtree(remote_repo_dir) if gd is not None: gd.proc.wait()
def remote_repo_creator(self): rw_daemon_repo_dir = tempfile.mktemp(prefix="daemon_repo-%s-" % func.__name__) rw_repo_dir = tempfile.mktemp(prefix="daemon_cloned_repo-%s-" % func.__name__) rw_daemon_repo = self.rorepo.clone(rw_daemon_repo_dir, shared=True, bare=True) # recursive alternates info ? rw_repo = rw_daemon_repo.clone(rw_repo_dir, shared=True, bare=False, n=True) try: rw_repo.head.commit = working_tree_ref rw_repo.head.reference.checkout() # prepare for git-daemon rw_daemon_repo.daemon_export = True # this thing is just annoying ! with rw_daemon_repo.config_writer() as crw: section = "daemon" try: crw.add_section(section) except Exception: pass crw.set(section, "receivepack", True) # Initialize the remote - first do it as local remote and pull, then # we change the url to point to the daemon. d_remote = Remote.create(rw_repo, "daemon_origin", rw_daemon_repo_dir) d_remote.fetch() base_daemon_path, rel_repo_dir = osp.split(rw_daemon_repo_dir) remote_repo_url = Git.polish_url( "git://localhost:%s/%s" % (GIT_DAEMON_PORT, rel_repo_dir)) with d_remote.config_writer as cw: cw.set('url', remote_repo_url) with git_daemon_launched( Git.polish_url( base_daemon_path, is_cygwin=False), # No daemon in Cygwin. '127.0.0.1', GIT_DAEMON_PORT): # Try listing remotes, to diagnose whether the daemon is up. rw_repo.git.ls_remote(d_remote) with cwd(rw_repo.working_dir): try: return func(self, rw_repo, rw_daemon_repo) except: log.info( "Keeping repos after failure: \n rw_repo_dir: %s \n rw_daemon_repo_dir: %s", rw_repo_dir, rw_daemon_repo_dir) rw_repo_dir = rw_daemon_repo_dir = None raise finally: rw_repo.git.clear_cache() rw_daemon_repo.git.clear_cache() del rw_repo del rw_daemon_repo gc.collect() gitdb.util.mman.collect() gc.collect() if rw_repo_dir: rmtree(rw_repo_dir) if rw_daemon_repo_dir: rmtree(rw_daemon_repo_dir)
def __exit__(self, type=None, value=None, traceback=None): if self.is_dir: rmtree(self.path) else: rmfile(self.path)
def test_base(self): rlp_head = fixture_path('reflog_HEAD') rlp_master = fixture_path('reflog_master') tdir = tempfile.mktemp(suffix="test_reflogs") os.mkdir(tdir) rlp_master_ro = RefLog.path(self.rorepo.head) assert osp.isfile(rlp_master_ro) # simple read reflog = RefLog.from_file(rlp_master_ro) assert reflog._path is not None assert isinstance(reflog, RefLog) assert len(reflog) # iter_entries works with path and with stream assert len(list(RefLog.iter_entries(open(rlp_master, 'rb')))) assert len(list(RefLog.iter_entries(rlp_master))) # raise on invalid revlog # TODO: Try multiple corrupted ones ! pp = 'reflog_invalid_' for suffix in ('oldsha', 'newsha', 'email', 'date', 'sep'): self.assertRaises(ValueError, RefLog.from_file, fixture_path(pp + suffix)) # END for each invalid file # cannot write an uninitialized reflog self.assertRaises(ValueError, RefLog().write) # test serialize and deserialize - results must match exactly binsha = hex_to_bin(('f' * 40).encode('ascii')) msg = "my reflog message" cr = self.rorepo.config_reader() for rlp in (rlp_head, rlp_master): reflog = RefLog.from_file(rlp) tfile = osp.join(tdir, osp.basename(rlp)) reflog.to_file(tfile) assert reflog.write() is reflog # parsed result must match ... treflog = RefLog.from_file(tfile) assert treflog == reflog # ... as well as each bytes of the written stream assert open(tfile).read() == open(rlp).read() # append an entry entry = RefLog.append_entry(cr, tfile, IndexObject.NULL_BIN_SHA, binsha, msg) assert entry.oldhexsha == IndexObject.NULL_HEX_SHA assert entry.newhexsha == 'f' * 40 assert entry.message == msg assert RefLog.from_file(tfile)[-1] == entry # index entry # raises on invalid index self.assertRaises(IndexError, RefLog.entry_at, rlp, 10000) # indices can be positive ... assert isinstance(RefLog.entry_at(rlp, 0), RefLogEntry) RefLog.entry_at(rlp, 23) # ... and negative for idx in (-1, -24): RefLog.entry_at(rlp, idx) # END for each index to read # END for each reflog # finally remove our temporary data rmtree(tdir)
def remove(self, module=True, force=False, configuration=True, dry_run=False): """Remove this submodule from the repository. This will remove our entry from the .gitmodules file and the entry in the .git/config file. :param module: If True, the module we point to will be deleted as well. If the module is currently on a commit which is not part of any branch in the remote, if the currently checked out branch working tree, or untracked files, is ahead of its tracking branch, if you have modifications in the In case the removal of the repository fails for these reasons, the submodule status will not have been altered. If this submodule has child-modules on its own, these will be deleted prior to touching the own module. :param force: Enforces the deletion of the module even though it contains modifications. This basically enforces a brute-force file system based deletion. :param configuration: if True, the submodule is deleted from the configuration, otherwise it isn't. Although this should be enabled most of the times, this flag enables you to safely delete the repository of your submodule. :param dry_run: if True, we will not actually do anything, but throw the errors we would usually throw :return: self :note: doesn't work in bare repositories :raise InvalidGitRepositoryError: thrown if the repository cannot be deleted :raise OSError: if directories or files could not be removed""" if not (module + configuration): raise ValueError( "Need to specify to delete at least the module, or the configuration" ) # END handle params # DELETE MODULE REPOSITORY ########################## if module and self.module_exists(): if force: # take the fast lane and just delete everything in our module path # TODO: If we run into permission problems, we have a highly inconsistent # state. Delete the .git folders last, start with the submodules first mp = self.abspath method = None if os.path.islink(mp): method = os.remove elif os.path.isdir(mp): method = rmtree elif os.path.exists(mp): raise AssertionError( "Cannot forcibly delete repository as it was neither a link, nor a directory" ) #END handle brutal deletion if not dry_run: assert method method(mp) #END apply deletion method else: # verify we may delete our module mod = self.module() if mod.is_dirty(untracked_files=True): raise InvalidGitRepositoryError( "Cannot delete module at %s with any modifications, unless force is specified" % mod.working_tree_dir) # END check for dirt # figure out whether we have new commits compared to the remotes # NOTE: If the user pulled all the time, the remote heads might # not have been updated, so commits coming from the remote look # as if they come from us. But we stay strictly read-only and # don't fetch beforhand. for remote in mod.remotes: num_branches_with_new_commits = 0 rrefs = remote.refs for rref in rrefs: num_branches_with_new_commits = len( mod.git.cherry(rref)) != 0 # END for each remote ref # not a single remote branch contained all our commits if num_branches_with_new_commits == len(rrefs): raise InvalidGitRepositoryError( "Cannot delete module at %s as there are new commits" % mod.working_tree_dir) # END handle new commits # have to manually delete references as python's scoping is # not existing, they could keep handles open ( on windows this is a problem ) if len(rrefs): del (rref) #END handle remotes del (rrefs) del (remote) # END for each remote # gently remove all submodule repositories for sm in self.children(): sm.remove(module=True, force=False, configuration=False, dry_run=dry_run) del (sm) # END for each child-submodule # finally delete our own submodule if not dry_run: wtd = mod.working_tree_dir del (mod) # release file-handles (windows) rmtree(wtd) # END delete tree if possible # END handle force # END handle module deletion # DELETE CONFIGURATION ###################### if configuration and not dry_run: # first the index-entry index = self.repo.index try: del (index.entries[index.entry_key(self.path, 0)]) except KeyError: pass #END delete entry index.write() # now git config - need the config intact, otherwise we can't query # inforamtion anymore self.repo.config_writer().remove_section(sm_section(self.name)) self.config_writer().remove_section() # END delete configuration # void our data not to delay invalid access self._clear_cache() return self
def test_index_file_diffing(self, rw_repo): # default Index instance points to our index index = IndexFile(rw_repo) assert index.path is not None assert len(index.entries) # write the file back index.write() # could sha it, or check stats # test diff # resetting the head will leave the index in a different state, and the # diff will yield a few changes cur_head_commit = rw_repo.head.reference.commit rw_repo.head.reset('HEAD~6', index=True, working_tree=False) # diff against same index is 0 diff = index.diff() self.assertEqual(len(diff), 0) # against HEAD as string, must be the same as it matches index diff = index.diff('HEAD') self.assertEqual(len(diff), 0) # against previous head, there must be a difference diff = index.diff(cur_head_commit) assert len(diff) # we reverse the result adiff = index.diff(str(cur_head_commit), R=True) odiff = index.diff(cur_head_commit, R=False) # now its not reversed anymore assert adiff != odiff self.assertEqual(odiff, diff) # both unreversed diffs against HEAD # against working copy - its still at cur_commit wdiff = index.diff(None) assert wdiff != adiff assert wdiff != odiff # against something unusual self.failUnlessRaises(ValueError, index.diff, int) # adjust the index to match an old revision cur_branch = rw_repo.active_branch cur_commit = cur_branch.commit rev_head_parent = 'HEAD~1' assert index.reset(rev_head_parent) is index self.assertEqual(cur_branch, rw_repo.active_branch) self.assertEqual(cur_commit, rw_repo.head.commit) # there must be differences towards the working tree which is in the 'future' assert index.diff(None) # reset the working copy as well to current head,to pull 'back' as well new_data = b"will be reverted" file_path = osp.join(rw_repo.working_tree_dir, "CHANGES") with open(file_path, "wb") as fp: fp.write(new_data) index.reset(rev_head_parent, working_tree=True) assert not index.diff(None) self.assertEqual(cur_branch, rw_repo.active_branch) self.assertEqual(cur_commit, rw_repo.head.commit) with open(file_path, 'rb') as fp: assert fp.read() != new_data # test full checkout test_file = osp.join(rw_repo.working_tree_dir, "CHANGES") with open(test_file, 'ab') as fd: fd.write(b"some data") rval = index.checkout(None, force=True, fprogress=self._fprogress) assert 'CHANGES' in list(rval) self._assert_fprogress([None]) assert osp.isfile(test_file) os.remove(test_file) rval = index.checkout(None, force=False, fprogress=self._fprogress) assert 'CHANGES' in list(rval) self._assert_fprogress([None]) assert osp.isfile(test_file) # individual file os.remove(test_file) rval = index.checkout(test_file, fprogress=self._fprogress) self.assertEqual(list(rval)[0], 'CHANGES') self._assert_fprogress([test_file]) assert osp.exists(test_file) # checking out non-existing file throws self.failUnlessRaises(CheckoutError, index.checkout, "doesnt_exist_ever.txt.that") self.failUnlessRaises(CheckoutError, index.checkout, paths=["doesnt/exist"]) # checkout file with modifications append_data = b"hello" with open(test_file, "ab") as fp: fp.write(append_data) try: index.checkout(test_file) except CheckoutError as e: self.assertEqual(len(e.failed_files), 1) self.assertEqual(e.failed_files[0], osp.basename(test_file)) self.assertEqual(len(e.failed_files), len(e.failed_reasons)) self.assertIsInstance(e.failed_reasons[0], string_types) self.assertEqual(len(e.valid_files), 0) with open(test_file, 'rb') as fd: s = fd.read() self.assertTrue(s.endswith(append_data), s) else: raise AssertionError("Exception CheckoutError not thrown") # if we force it it should work index.checkout(test_file, force=True) assert not open(test_file, 'rb').read().endswith(append_data) # checkout directory rmtree(osp.join(rw_repo.working_tree_dir, "lib")) rval = index.checkout('lib') assert len(list(rval)) > 1
def remove(self, module=True, force=False, configuration=True, dry_run=False): """Remove this submodule from the repository. This will remove our entry from the .gitmodules file and the entry in the .git/config file. :param module: If True, the module we point to will be deleted as well. If the module is currently on a commit which is not part of any branch in the remote, if the currently checked out branch working tree, or untracked files, is ahead of its tracking branch, if you have modifications in the In case the removal of the repository fails for these reasons, the submodule status will not have been altered. If this submodule has child-modules on its own, these will be deleted prior to touching the own module. :param force: Enforces the deletion of the module even though it contains modifications. This basically enforces a brute-force file system based deletion. :param configuration: if True, the submodule is deleted from the configuration, otherwise it isn't. Although this should be enabled most of the times, this flag enables you to safely delete the repository of your submodule. :param dry_run: if True, we will not actually do anything, but throw the errors we would usually throw :return: self :note: doesn't work in bare repositories :raise InvalidGitRepositoryError: thrown if the repository cannot be deleted :raise OSError: if directories or files could not be removed""" if not (module + configuration): raise ValueError("Need to specify to delete at least the module, or the configuration") # END handle params # DELETE MODULE REPOSITORY ########################## if module and self.module_exists(): if force: # take the fast lane and just delete everything in our module path # TODO: If we run into permission problems, we have a highly inconsistent # state. Delete the .git folders last, start with the submodules first mp = self.abspath method = None if os.path.islink(mp): method = os.remove elif os.path.isdir(mp): method = rmtree elif os.path.exists(mp): raise AssertionError("Cannot forcibly delete repository as it was neither a link, nor a directory") #END handle brutal deletion if not dry_run: assert method method(mp) #END apply deletion method else: # verify we may delete our module mod = self.module() if mod.is_dirty(untracked_files=True): raise InvalidGitRepositoryError("Cannot delete module at %s with any modifications, unless force is specified" % mod.working_tree_dir) # END check for dirt # figure out whether we have new commits compared to the remotes # NOTE: If the user pulled all the time, the remote heads might # not have been updated, so commits coming from the remote look # as if they come from us. But we stay strictly read-only and # don't fetch beforhand. for remote in mod.remotes: num_branches_with_new_commits = 0 rrefs = remote.refs for rref in rrefs: num_branches_with_new_commits = len(mod.git.cherry(rref)) != 0 # END for each remote ref # not a single remote branch contained all our commits if num_branches_with_new_commits == len(rrefs): raise InvalidGitRepositoryError("Cannot delete module at %s as there are new commits" % mod.working_tree_dir) # END handle new commits # have to manually delete references as python's scoping is # not existing, they could keep handles open ( on windows this is a problem ) if len(rrefs): del(rref) #END handle remotes del(rrefs) del(remote) # END for each remote # gently remove all submodule repositories for sm in self.children(): sm.remove(module=True, force=False, configuration=False, dry_run=dry_run) del(sm) # END for each child-submodule # finally delete our own submodule if not dry_run: wtd = mod.working_tree_dir del(mod) # release file-handles (windows) rmtree(wtd) # END delete tree if possible # END handle force # END handle module deletion # DELETE CONFIGURATION ###################### if configuration and not dry_run: # first the index-entry index = self.repo.index try: del(index.entries[index.entry_key(self.path, 0)]) except KeyError: pass #END delete entry index.write() # now git config - need the config intact, otherwise we can't query # inforamtion anymore self.repo.config_writer().remove_section(sm_section(self.name)) self.config_writer().remove_section() # END delete configuration # void our data not to delay invalid access self._clear_cache() return self
def _do_test_fetch(self, remote, rw_repo, remote_repo): # specialized fetch testing to de-clutter the main test self._do_test_fetch_info(rw_repo) def fetch_and_test(remote, **kwargs): progress = TestRemoteProgress() kwargs['progress'] = progress res = remote.fetch(**kwargs) progress.make_assertion() self._do_test_fetch_result(res, remote) return res # END fetch and check def get_info(res, remote, name): return res["%s/%s" % (remote, name)] # put remote head to master as it is guaranteed to exist remote_repo.head.reference = remote_repo.heads.master res = fetch_and_test(remote) # all up to date for info in res: self.assertTrue(info.flags & info.HEAD_UPTODATE) # rewind remote head to trigger rejection # index must be false as remote is a bare repo rhead = remote_repo.head remote_commit = rhead.commit rhead.reset("HEAD~2", index=False) res = fetch_and_test(remote) mkey = "%s/%s" % (remote, 'master') master_info = res[mkey] self.assertTrue(master_info.flags & FetchInfo.FORCED_UPDATE) self.assertIsNotNone(master_info.note) # normal fast forward - set head back to previous one rhead.commit = remote_commit res = fetch_and_test(remote) self.assertTrue(res[mkey].flags & FetchInfo.FAST_FORWARD) # new remote branch new_remote_branch = Head.create(remote_repo, "new_branch") res = fetch_and_test(remote) new_branch_info = get_info(res, remote, new_remote_branch) self.assertTrue(new_branch_info.flags & FetchInfo.NEW_HEAD) # remote branch rename ( causes creation of a new one locally ) new_remote_branch.rename("other_branch_name") res = fetch_and_test(remote) other_branch_info = get_info(res, remote, new_remote_branch) self.assertEqual(other_branch_info.ref.commit, new_branch_info.ref.commit) # remove new branch Head.delete(new_remote_branch.repo, new_remote_branch) res = fetch_and_test(remote) # deleted remote will not be fetched self.failUnlessRaises(IndexError, get_info, res, remote, new_remote_branch) # prune stale tracking branches stale_refs = remote.stale_refs self.assertEqual(len(stale_refs), 2) self.assertIsInstance(stale_refs[0], RemoteReference) RemoteReference.delete(rw_repo, *stale_refs) # test single branch fetch with refspec including target remote res = fetch_and_test(remote, refspec="master:refs/remotes/%s/master" % remote) self.assertEqual(len(res), 1) self.assertTrue(get_info(res, remote, 'master')) # ... with respec and no target res = fetch_and_test(remote, refspec='master') self.assertEqual(len(res), 1) # ... multiple refspecs ... works, but git command returns with error if one ref is wrong without # doing anything. This is new in later binaries # res = fetch_and_test(remote, refspec=['master', 'fred']) # self.assertEqual(len(res), 1) # add new tag reference rtag = TagReference.create(remote_repo, "1.0-RV_hello.there") res = fetch_and_test(remote, tags=True) tinfo = res[str(rtag)] self.assertIsInstance(tinfo.ref, TagReference) self.assertEqual(tinfo.ref.commit, rtag.commit) self.assertTrue(tinfo.flags & tinfo.NEW_TAG) # adjust tag commit Reference.set_object(rtag, rhead.commit.parents[0].parents[0]) res = fetch_and_test(remote, tags=True) tinfo = res[str(rtag)] self.assertEqual(tinfo.commit, rtag.commit) self.assertTrue(tinfo.flags & tinfo.TAG_UPDATE) # delete remote tag - local one will stay TagReference.delete(remote_repo, rtag) res = fetch_and_test(remote, tags=True) self.failUnlessRaises(IndexError, get_info, res, remote, str(rtag)) # provoke to receive actual objects to see what kind of output we have to # expect. For that we need a remote transport protocol # Create a new UN-shared repo and fetch into it after we pushed a change # to the shared repo other_repo_dir = tempfile.mktemp("other_repo") # must clone with a local path for the repo implementation not to freak out # as it wants local paths only ( which I can understand ) other_repo = remote_repo.clone(other_repo_dir, shared=False) remote_repo_url = osp.basename(remote_repo.git_dir) # git-daemon runs with appropriate `--base-path`. remote_repo_url = Git.polish_url("git://localhost:%s/%s" % (GIT_DAEMON_PORT, remote_repo_url)) # put origin to git-url other_origin = other_repo.remotes.origin with other_origin.config_writer as cw: cw.set("url", remote_repo_url) # it automatically creates alternates as remote_repo is shared as well. # It will use the transport though and ignore alternates when fetching # assert not other_repo.alternates # this would fail # assure we are in the right state rw_repo.head.reset(remote.refs.master, working_tree=True) try: self._commit_random_file(rw_repo) remote.push(rw_repo.head.reference) # here I would expect to see remote-information about packing # objects and so on. Unfortunately, this does not happen # if we are redirecting the output - git explicitly checks for this # and only provides progress information to ttys res = fetch_and_test(other_origin) finally: rmtree(other_repo_dir)
def test_base(self): rlp_head = fixture_path('reflog_HEAD') rlp_master = fixture_path('reflog_master') tdir = tempfile.mktemp(suffix="test_reflogs") os.mkdir(tdir) rlp_master_ro = RefLog.path(self.rorepo.head) assert osp.isfile(rlp_master_ro) # simple read reflog = RefLog.from_file(rlp_master_ro) assert reflog._path is not None assert isinstance(reflog, RefLog) assert len(reflog) # iter_entries works with path and with stream assert len(list(RefLog.iter_entries(open(rlp_master, 'rb')))) assert len(list(RefLog.iter_entries(rlp_master))) # raise on invalid revlog # TODO: Try multiple corrupted ones ! pp = 'reflog_invalid_' for suffix in ('oldsha', 'newsha', 'email', 'date', 'sep'): self.failUnlessRaises(ValueError, RefLog.from_file, fixture_path(pp + suffix)) # END for each invalid file # cannot write an uninitialized reflog self.failUnlessRaises(ValueError, RefLog().write) # test serialize and deserialize - results must match exactly binsha = hex_to_bin(('f' * 40).encode('ascii')) msg = "my reflog message" cr = self.rorepo.config_reader() for rlp in (rlp_head, rlp_master): reflog = RefLog.from_file(rlp) tfile = osp.join(tdir, osp.basename(rlp)) reflog.to_file(tfile) assert reflog.write() is reflog # parsed result must match ... treflog = RefLog.from_file(tfile) assert treflog == reflog # ... as well as each bytes of the written stream assert open(tfile).read() == open(rlp).read() # append an entry entry = RefLog.append_entry(cr, tfile, IndexObject.NULL_BIN_SHA, binsha, msg) assert entry.oldhexsha == IndexObject.NULL_HEX_SHA assert entry.newhexsha == 'f' * 40 assert entry.message == msg assert RefLog.from_file(tfile)[-1] == entry # index entry # raises on invalid index self.failUnlessRaises(IndexError, RefLog.entry_at, rlp, 10000) # indices can be positive ... assert isinstance(RefLog.entry_at(rlp, 0), RefLogEntry) RefLog.entry_at(rlp, 23) # ... and negative for idx in (-1, -24): RefLog.entry_at(rlp, idx) # END for each index to read # END for each reflog # finally remove our temporary data rmtree(tdir)
def test_index_file_diffing(self, rw_repo): # default Index instance points to our index index = IndexFile(rw_repo) assert index.path is not None assert len(index.entries) # write the file back index.write() # could sha it, or check stats # test diff # resetting the head will leave the index in a different state, and the # diff will yield a few changes cur_head_commit = rw_repo.head.reference.commit rw_repo.head.reset('HEAD~6', index=True, working_tree=False) # diff against same index is 0 diff = index.diff() self.assertEqual(len(diff), 0) # against HEAD as string, must be the same as it matches index diff = index.diff('HEAD') self.assertEqual(len(diff), 0) # against previous head, there must be a difference diff = index.diff(cur_head_commit) assert len(diff) # we reverse the result adiff = index.diff(str(cur_head_commit), R=True) odiff = index.diff(cur_head_commit, R=False) # now its not reversed anymore assert adiff != odiff self.assertEqual(odiff, diff) # both unreversed diffs against HEAD # against working copy - its still at cur_commit wdiff = index.diff(None) assert wdiff != adiff assert wdiff != odiff # against something unusual self.failUnlessRaises(ValueError, index.diff, int) # adjust the index to match an old revision cur_branch = rw_repo.active_branch cur_commit = cur_branch.commit rev_head_parent = 'HEAD~1' assert index.reset(rev_head_parent) is index self.assertEqual(cur_branch, rw_repo.active_branch) self.assertEqual(cur_commit, rw_repo.head.commit) # there must be differences towards the working tree which is in the 'future' assert index.diff(None) # reset the working copy as well to current head,to pull 'back' as well new_data = b"will be reverted" file_path = osp.join(rw_repo.working_tree_dir, "CHANGES") with open(file_path, "wb") as fp: fp.write(new_data) index.reset(rev_head_parent, working_tree=True) assert not index.diff(None) self.assertEqual(cur_branch, rw_repo.active_branch) self.assertEqual(cur_commit, rw_repo.head.commit) with open(file_path, 'rb') as fp: assert fp.read() != new_data # test full checkout test_file = osp.join(rw_repo.working_tree_dir, "CHANGES") with open(test_file, 'ab') as fd: fd.write(b"some data") rval = index.checkout(None, force=True, fprogress=self._fprogress) assert 'CHANGES' in list(rval) self._assert_fprogress([None]) assert osp.isfile(test_file) os.remove(test_file) rval = index.checkout(None, force=False, fprogress=self._fprogress) assert 'CHANGES' in list(rval) self._assert_fprogress([None]) assert osp.isfile(test_file) # individual file os.remove(test_file) rval = index.checkout(test_file, fprogress=self._fprogress) self.assertEqual(list(rval)[0], 'CHANGES') self._assert_fprogress([test_file]) assert osp.exists(test_file) # checking out non-existing file throws self.failUnlessRaises(CheckoutError, index.checkout, "doesnt_exist_ever.txt.that") self.failUnlessRaises(CheckoutError, index.checkout, paths=["doesnt/exist"]) # checkout file with modifications append_data = b"hello" with open(test_file, "ab") as fp: fp.write(append_data) try: index.checkout(test_file) except CheckoutError as e: self.assertEqual(len(e.failed_files), 1) self.assertEqual(e.failed_files[0], osp.basename(test_file)) self.assertEqual(len(e.failed_files), len(e.failed_reasons)) self.assertIsInstance(e.failed_reasons[0], str) self.assertEqual(len(e.valid_files), 0) with open(test_file, 'rb') as fd: s = fd.read() self.assertTrue(s.endswith(append_data), s) else: raise AssertionError("Exception CheckoutError not thrown") # if we force it it should work index.checkout(test_file, force=True) assert not open(test_file, 'rb').read().endswith(append_data) # checkout directory rmtree(osp.join(rw_repo.working_tree_dir, "lib")) rval = index.checkout('lib') assert len(list(rval)) > 1
def remote_repo_creator(self): remote_repo_dir = _mktemp("remote_repo_%s" % func.__name__) repo_dir = _mktemp("remote_clone_non_bare_repo") rw_remote_repo = self.rorepo.clone(remote_repo_dir, shared=True, bare=True) # recursive alternates info ? rw_repo = rw_remote_repo.clone(repo_dir, shared=True, bare=False, n=True) rw_repo.head.commit = working_tree_ref rw_repo.head.reference.checkout() # prepare for git-daemon rw_remote_repo.daemon_export = True # this thing is just annoying ! with rw_remote_repo.config_writer() as crw: section = "daemon" try: crw.add_section(section) except Exception: pass crw.set(section, "receivepack", True) # initialize the remote - first do it as local remote and pull, then # we change the url to point to the daemon. The daemon should be started # by the user, not by us d_remote = Remote.create(rw_repo, "daemon_origin", remote_repo_dir) d_remote.fetch() remote_repo_url = "git://localhost:%s%s" % (GIT_DAEMON_PORT, remote_repo_dir) with d_remote.config_writer as cw: cw.set('url', remote_repo_url) temp_dir = osp(_mktemp()) gd = launch_git_daemon(temp_dir, '127.0.0.1', GIT_DAEMON_PORT) try: # yes, I know ... fortunately, this is always going to work if sleep time is just large enough time.sleep(0.5) # end # try to list remotes to diagnoes whether the server is up try: rw_repo.git.ls_remote(d_remote) except GitCommandError as e: # We assume in good faith that we didn't start the daemon - but make sure we kill it anyway # Of course we expect it to work here already, but maybe there are timing constraints # on some platforms ? try: gd.proc.terminate() except Exception as ex: log.debug( "Ignoring %r while terminating proc after %r.", ex, e) log.warning('git(%s) ls-remote failed due to:%s', rw_repo.git_dir, e) if is_win: msg = textwrap.dedent(""" MINGW yet has problems with paths, and `git-daemon.exe` must be in PATH (look into .\Git\mingw64\libexec\git-core\); CYGWIN has no daemon, but if one exists, it gets along fine (has also paths problems) Anyhow, alternatively try starting `git-daemon` manually:""" ) else: msg = "Please try starting `git-daemon` manually:" msg += textwrap.dedent(""" git daemon --enable=receive-pack '%s' You can also run the daemon on a different port by passing --port=<port>" and setting the environment variable GIT_PYTHON_TEST_GIT_DAEMON_PORT to <port> """ % temp_dir) from nose import SkipTest raise SkipTest(msg) if is_win else AssertionError(msg) # END make assertion # END catch ls remote error # adjust working dir prev_cwd = os.getcwd() os.chdir(rw_repo.working_dir) try: return func(self, rw_repo, rw_remote_repo) except: log.info( "Keeping repos after failure: repo_dir = %s, remote_repo_dir = %s", repo_dir, remote_repo_dir) repo_dir = remote_repo_dir = None raise finally: os.chdir(prev_cwd) finally: try: gd.proc.kill() except: ## Either it has died (and we're here), or it won't die, again here... pass rw_repo.git.clear_cache() rw_remote_repo.git.clear_cache() rw_repo = rw_remote_repo = None import gc gc.collect() if repo_dir: rmtree(repo_dir) if remote_repo_dir: rmtree(remote_repo_dir) if gd is not None: gd.proc.wait()