def test_format_initialize_find_open(self): # loopback test to check the current format initializes to itself. if not self.branch_format.is_supported(): # unsupported formats are not loopback testable # because the default open will not open them and # they may not be initializable. return # supported formats must be able to init and open t = get_transport(self.get_url()) readonly_t = get_transport(self.get_readonly_url()) made_branch = self.make_branch('.') self.failUnless(isinstance(made_branch, branch.Branch)) # find it via bzrdir opening: opened_control = bzrdir.BzrDir.open(readonly_t.base) direct_opened_branch = opened_control.open_branch() self.assertEqual(direct_opened_branch.__class__, made_branch.__class__) self.assertEqual(opened_control, direct_opened_branch.bzrdir) self.failUnless(isinstance(direct_opened_branch._format, self.branch_format.__class__)) # find it via Branch.open opened_branch = branch.Branch.open(readonly_t.base) self.failUnless(isinstance(opened_branch, made_branch.__class__)) self.assertEqual(made_branch._format.__class__, opened_branch._format.__class__) # if it has a unique id string, can we probe for it ? try: self.branch_format.get_format_string() except NotImplementedError: return self.assertEqual(self.branch_format, opened_control.find_branch_format())
def setUp(self, backing_server=None): """Setup the Chroot on backing_server.""" if backing_server is not None: self.backing_transport = get_transport(backing_server.get_url()) else: self.backing_transport = get_transport('.') ChrootServer.setUp(self)
def test_format_initialize_find_open(self): # loopback test to check the current format initializes to itself. if not self.repository_format.is_supported(): # unsupported formats are not loopback testable # because the default open will not open them and # they may not be initializable. return # supported formats must be able to init and open t = get_transport(self.get_url()) readonly_t = get_transport(self.get_readonly_url()) made_control = self.bzrdir_format.initialize(t.base) made_repo = self.repository_format.initialize(made_control) self.assertEqual(made_control, made_repo.bzrdir) # find it via bzrdir opening: opened_control = bzrdir.BzrDir.open(readonly_t.base) direct_opened_repo = opened_control.open_repository() self.assertEqual(direct_opened_repo.__class__, made_repo.__class__) self.assertEqual(opened_control, direct_opened_repo.bzrdir) self.assertIsInstance(direct_opened_repo._format, self.repository_format.__class__) # find it via Repository.open opened_repo = repository.Repository.open(readonly_t.base) self.failUnless(isinstance(opened_repo, made_repo.__class__)) self.assertEqual(made_repo._format.__class__, opened_repo._format.__class__) # if it has a unique id string, can we probe for it ? try: self.repository_format.get_format_string() except NotImplementedError: return self.assertEqual( self.repository_format, repository.RepositoryFormat.find_format(opened_control))
def setUp(self): BzrTestCase.setUp(self) memory_server = self._setUpMemoryServer() memory_transport = get_transport(memory_server.get_url()) backing_transport = memory_transport.clone('backing') self._frontend = InMemoryFrontend() self.factory = self._frontend.getLaunchpadObjectFactory() codehosting_api = self._frontend.getCodehostingEndpoint() self.requester = self.factory.makePerson() self.writable_branch = self.factory.makeAnyBranch( branch_type=BranchType.HOSTED, owner=self.requester).unique_name self.writable_file = '/%s/.bzr/hello.txt' % self.writable_branch self.read_only_branch = self.factory.makeAnyBranch( branch_type=BranchType.HOSTED).unique_name self.lp_server = self._setUpLaunchpadServer(self.requester.id, codehosting_api, backing_transport) self.lp_transport = get_transport(self.lp_server.get_url()) self.lp_transport.mkdir(os.path.dirname(self.writable_file)) self.lp_transport.put_bytes(self.writable_file, 'Hello World!')
def get_chrooted_transport(url, mkdir=False): """Return a chrooted transport serving `url`.""" transport = get_transport(url) if mkdir: transport.create_prefix() chroot_server = chroot.ChrootServer(transport) chroot_server.start_server() return get_transport(chroot_server.get_url())
def test_clone(self): server = ChrootServer(get_transport('memory:///foo/bar/')) server.setUp() transport = get_transport(server.get_url()) # relpath from root and root path are the same relpath_cloned = transport.clone('foo') abspath_cloned = transport.clone('/foo') self.assertEqual(server, relpath_cloned.server) self.assertEqual(server, abspath_cloned.server) server.tearDown()
def test_abspath(self): # The abspath is always relative to the chroot_url. server = ChrootServer(get_transport('memory:///foo/bar/')) server.setUp() transport = get_transport(server.get_url()) self.assertEqual(server.get_url(), transport.abspath('/')) subdir_transport = transport.clone('subdir') self.assertEqual(server.get_url(), subdir_transport.abspath('/')) server.tearDown()
def test_commit_nicks(self): """Nicknames are committed to the revision""" get_transport(self.get_url()).mkdir('bzr.dev') wt = self.make_branch_and_tree('bzr.dev') branch = wt.branch branch.nick = "My happy branch" wt.commit('My commit respect da nick.') committed = branch.repository.get_revision(branch.last_revision()) self.assertEqual(committed.properties["branch-nick"], "My happy branch")
def do_POST(self): """Hand the request off to a smart server instance.""" backing = get_transport(self.server.test_case_server._home_dir) chroot_server = chroot.ChrootServer(backing) chroot_server.setUp() try: t = get_transport(chroot_server.get_url()) self.do_POST_inner(t) finally: chroot_server.tearDown()
def run(self, _check_transaction=False): """See `IBranchUpgradeJob`.""" # Set up the new branch structure with server(get_rw_server(), no_replace=True): upgrade_branch_path = tempfile.mkdtemp() try: upgrade_transport = get_transport(upgrade_branch_path) upgrade_transport.mkdir('.bzr') source_branch_transport = get_transport( self.branch.getInternalBzrUrl()) source_branch_transport.clone('.bzr').copy_tree_to_transport( upgrade_transport.clone('.bzr')) transaction.commit() upgrade_branch = BzrBranch.open_from_transport( upgrade_transport) # No transactions are open so the DB connection won't be # killed. with TransactionFreeOperation(): # Perform the upgrade. upgrade(upgrade_branch.base) # Re-open the branch, since its format has changed. upgrade_branch = BzrBranch.open_from_transport( upgrade_transport) source_branch = BzrBranch.open_from_transport( source_branch_transport) source_branch.lock_write() upgrade_branch.pull(source_branch) upgrade_branch.fetch(source_branch) source_branch.unlock() # Move the branch in the old format to backup.bzr try: source_branch_transport.delete_tree('backup.bzr') except NoSuchFile: pass source_branch_transport.rename('.bzr', 'backup.bzr') source_branch_transport.mkdir('.bzr') upgrade_transport.clone('.bzr').copy_tree_to_transport( source_branch_transport.clone('.bzr')) # Re-open the source branch again. source_branch = BzrBranch.open_from_transport( source_branch_transport) formats = get_branch_formats(source_branch) self.branch.branchChanged( self.branch.stacked_on, self.branch.last_scanned_id, *formats) finally: shutil.rmtree(upgrade_branch_path)
def test_checkOneBranch_old_branch_missing(self): # checkOneBranch returns False when there is no bzr branchfor the # database branch in old distroseries. db_branch = self.makeOfficialPackageBranch() brancher = self.makeNewSeriesAndBrancher(db_branch.distroseries) brancher.makeOneNewBranch(db_branch) url = "lp-internal:///" + db_branch.unique_name get_transport(url).delete_tree(".bzr") ok = brancher.checkOneBranch(db_branch) self.assertFalse(ok) self.assertLogMessages(["^WARNING No bzr branch at old location " "lp-internal:///.*/.*/.*/.*$"])
def testHttpTransportStillThere(self): # We tweak the http:// transport in the worker. Make sure that it's # still available after mirroring. http = get_transport('http://example.com') source_branch = self.make_branch('source-branch') to_mirror = self.makePullerWorker( source_branch.base, self.get_url('destdir')) to_mirror.mirrorWithoutChecks() new_http = get_transport('http://example.com') self.assertEqual(get_transport('http://example.com').base, http.base) self.assertEqual(new_http.__class__, http.__class__)
def testHttpTransportStillThere(self): # We tweak the http:// transport in the worker. Make sure that it's # still available after mirroring. http = get_transport('http://example.com') source_branch = self.make_branch('source-branch') to_mirror = self.makePullerWorker(source_branch.base, self.get_url('destdir')) to_mirror.mirrorWithoutChecks() new_http = get_transport('http://example.com') self.assertEqual(get_transport('http://example.com').base, http.base) self.assertEqual(new_http.__class__, http.__class__)
def _resolve(self, url, _request_factory=ResolveLaunchpadPathRequest, _lp_login=None): """Resolve the base URL for this transport.""" url, path = self._update_url_scheme(url) if _lp_login is None: _lp_login = get_lp_login() path = path.strip('/') path = self._expand_user(path, url, _lp_login) if _lp_login is not None: result = self._resolve_locally(path, url, _request_factory) if 'launchpad' in debug.debug_flags: local_res = result result = self._resolve_via_xmlrpc(path, url, _request_factory) trace.note( gettext('resolution for {0}\n local: {1}\n remote: {2}'). format(url, local_res['urls'], result['urls'])) else: result = self._resolve_via_xmlrpc(path, url, _request_factory) if 'launchpad' in debug.debug_flags: trace.mutter("resolve_lp_path(%r) == %r", url, result) _warned_login = False for url in result['urls']: scheme, netloc, path, query, fragment = urlsplit(url) if self._requires_launchpad_login(scheme, netloc, path, query, fragment): # Only accept launchpad.net bzr+ssh URLs if we know # the user's Launchpad login: if _lp_login is not None: break if _lp_login is None: if not _warned_login: trace.warning( 'You have not informed bzr of your Launchpad ID, and you must do this to\n' 'write to Launchpad or access private data. See "bzr help launchpad-login".' ) _warned_login = True else: # Use the URL if we can create a transport for it. try: transport.get_transport(url) except (errors.PathError, errors.TransportError): pass else: break else: raise errors.InvalidURL(path=url, extra='no supported schemes') return url
def _initialize_bazaar_server(self, directory, server_port): from bzrlib import urlutils from bzrlib.transport import get_transport from bzrlib.transport.chroot import ChrootServer url = urlutils.local_path_to_url(directory) url = 'readonly+' + url print url chroot_server = ChrootServer(get_transport(url)) chroot_server.setUp() t = get_transport(chroot_server.get_url()) print chroot_server.get_url() self._bazaar_server = SmartTCPServer( t, 'localhost', server_port) self._bazaar_server.start_background_thread()
def test_open_on_server(self): basedir = get_transport(self.setup_memory()).clone('path') basedir.create_prefix() ui = self.get_test_ui() serve = server.Server(ui) serve.start(port=0) try: source_mirror = mirrorset.initialise(basedir, 'myname', basedir, ui) serve.add(source_mirror) server_transport = get_transport(serve.addresses[0]) opened_mirror = mirrorset.MirrorSet(server_transport, 'myname', ui) self.assertIsInstance(opened_mirror, mirrorset.HTTPMirrorSet) finally: serve.stop()
def test_urljoin_preserves_chroot(self): """Using urlutils.join(url, '..') on a chroot URL should not produce a URL that escapes the intended chroot. This is so that it is not possible to escape a chroot by doing:: url = chroot_transport.base parent_url = urlutils.join(url, '..') new_transport = get_transport(parent_url) """ server = ChrootServer(get_transport('memory:///path/')) server.setUp() transport = get_transport(server.get_url()) self.assertRaises(InvalidURLJoin, urlutils.join, transport.base, '..') server.tearDown()
def test_checkOneBranch_old_branch_missing(self): # checkOneBranch returns False when there is no bzr branchfor the # database branch in old distroseries. db_branch = self.makeOfficialPackageBranch() brancher = self.makeNewSeriesAndBrancher(db_branch.distroseries) brancher.makeOneNewBranch(db_branch) url = 'lp-internal:///' + db_branch.unique_name get_transport(url).delete_tree('.bzr') ok = brancher.checkOneBranch(db_branch) self.assertFalse(ok) self.assertLogMessages([ '^WARNING No bzr branch at old location ' 'lp-internal:///.*/.*/.*/.*$' ])
def _resolve(self, url, _request_factory=ResolveLaunchpadPathRequest, _lp_login=None): """Resolve the base URL for this transport.""" url, path = self._update_url_scheme(url) if _lp_login is None: _lp_login = get_lp_login() path = path.strip('/') path = self._expand_user(path, url, _lp_login) if _lp_login is not None: result = self._resolve_locally(path, url, _request_factory) if 'launchpad' in debug.debug_flags: local_res = result result = self._resolve_via_xmlrpc(path, url, _request_factory) trace.note(gettext( 'resolution for {0}\n local: {1}\n remote: {2}').format( url, local_res['urls'], result['urls'])) else: result = self._resolve_via_xmlrpc(path, url, _request_factory) if 'launchpad' in debug.debug_flags: trace.mutter("resolve_lp_path(%r) == %r", url, result) _warned_login = False for url in result['urls']: scheme, netloc, path, query, fragment = urlsplit(url) if self._requires_launchpad_login(scheme, netloc, path, query, fragment): # Only accept launchpad.net bzr+ssh URLs if we know # the user's Launchpad login: if _lp_login is not None: break if _lp_login is None: if not _warned_login: trace.warning( 'You have not informed bzr of your Launchpad ID, and you must do this to\n' 'write to Launchpad or access private data. See "bzr help launchpad-login".') _warned_login = True else: # Use the URL if we can create a transport for it. try: transport.get_transport(url) except (errors.PathError, errors.TransportError): pass else: break else: raise errors.InvalidURL(path=url, extra='no supported schemes') return url
def test_urljoin_preserves_chroot(self): """Using urlutils.join(url, '..') on a chroot URL should not produce a URL that escapes the intended chroot. This is so that it is not possible to escape a chroot by doing:: url = chroot_transport.base parent_url = urlutils.join(url, '..') new_transport = get_transport(parent_url) """ server = ChrootServer(get_transport('memory:///path/')) server.setUp() transport = get_transport(server.get_url()) self.assertRaises( InvalidURLJoin, urlutils.join, transport.base, '..') server.tearDown()
def make_app(root, prefix, path_var='REQUEST_URI', readonly=True): """Convenience function to construct a WSGI bzr smart server. :param root: a local path that requests will be relative to. :param prefix: See RelpathSetter. :param path_var: See RelpathSetter. """ local_url = local_path_to_url(root) if readonly: base_transport = get_transport('readonly+' + local_url) else: base_transport = get_transport(local_url) app = SmartWSGIApp(base_transport, prefix) app = RelpathSetter(app, '', path_var) return app
def test_excess_files_on_delete_directory(self): # bzrlib.trace doesn't trace delete_tree def delete_tree(self, relpath): self._activity.append(('delete_tree', relpath)) return self._decorated.delete(relpath) self.useFixture(MonkeyPatch( 'bzrlib.transport.trace.TransportTraceDecorator.delete_tree', delete_tree)) basedir = get_transport('trace+' + self.setup_memory()).clone('path') basedir.create_prefix() subdir = basedir.clone('dir') subdir.create_prefix() subdir.put_bytes('abc', 'def') sourcedir = basedir.clone('../source') sourcedir.create_prefix() j1 = journals.Journal() # TODO: we need to set mtime to something different - in a new test, # but that needs VFS improvements, or real files, or mocks or smething. j1.add('dir', 'del', journals.DirContent()) del basedir._activity[:] ui = UI() generator = journals.ReplayGenerator(j1, sourcedir, ui) replay = journals.TransportReplay(j1, generator, basedir, ui) replay.replay() self.assertEqual( [('rmdir', 'dir'), ('delete_tree', 'dir')], basedir._activity) self.assertEqual([ ('log', 7, 'l_mirror.journals', 'Deleting excess files in directory dir'), ], ui.outputs)
def server(server, no_replace=False): run_server = True if no_replace: try: get_transport(server.get_url()) except UnsupportedProtocol: pass else: run_server = False if run_server: server.start_server() try: yield server finally: if run_server: server.stop_server()
def test_reconcile_wrong_order_secondary_inventory(self): # a wrong order in the parents for inventories is ignored. t = get_transport(self.get_url()).clone('reversed-secondary-parents') d = bzrlib.bzrdir.BzrDir.open_from_transport(t) repo = d.open_repository() self.checkUnreconciled(d, repo.reconcile()) self.checkUnreconciled(d, repo.reconcile(thorough=True))
def setUp(self): TestCaseInTempDir.setUp(self) transport = get_transport('.') transport.mkdir('.bzr') self.sub_transport = transport.clone('.bzr') self.lockable = self.get_lockable() self.lockable.create_lock()
def test_upgrade_v6_to_meta_no_workingtree(self): # Some format6 branches do not have checkout files. Upgrading # such a branch to metadir must not setup a working tree. self.build_tree_contents(_upgrade1_template) upgrade('.', bzrdir.BzrDirFormat6()) transport = get_transport('.') transport.delete_multi(['.bzr/pending-merges', '.bzr/inventory']) self.assertFalse(transport.has('.bzr/stat-cache')) # XXX: upgrade fails if a backup.bzr is already present # -- David Allouche 2006-08-11 transport.delete_tree('backup.bzr') # At this point, we have a format6 branch without checkout files. upgrade('.', bzrdir.BzrDirMetaFormat1()) # The upgrade should not have set up a working tree. control = bzrdir.BzrDir.open('.') self.assertFalse(control.has_workingtree()) # We have covered the scope of this test, we may as well check that # upgrade has not eaten our data, even if it's a bit redundant with # other tests. self.failUnless(isinstance(control._format, bzrdir.BzrDirMetaFormat1)) branch = control.open_branch() self.assertEquals(branch.revision_history(), [ '[email protected]', '[email protected]' ])
def _show_push_branch(br_from, revision_id, location, to_file, verbose=False, overwrite=False, remember=False, stacked_on=None, create_prefix=False, use_existing_dir=False, no_tree=False): """Push a branch to a location. :param br_from: the source branch :param revision_id: the revision-id to push up to :param location: the url of the destination :param to_file: the output stream :param verbose: if True, display more output than normal :param overwrite: list of things to overwrite ("history", "tags") or boolean indicating for everything :param remember: if True, store the location as the push location for the source branch :param stacked_on: the url of the branch, if any, to stack on; if set, only the revisions not in that branch are pushed :param create_prefix: if True, create the necessary parent directories at the destination if they don't already exist :param use_existing_dir: if True, proceed even if the destination directory exists without a current .bzr directory in it """ to_transport = transport.get_transport(location) try: dir_to = controldir.ControlDir.open_from_transport(to_transport) except errors.NotBranchError: # Didn't find anything dir_to = None if dir_to is None: try: br_to = br_from.create_clone_on_transport( to_transport, revision_id=revision_id, stacked_on=stacked_on, create_prefix=create_prefix, use_existing_dir=use_existing_dir, no_tree=no_tree) except errors.AlreadyControlDirError, err: raise errors.BzrCommandError( gettext( "Target directory %s already contains a .bzr directory, " "but it is not valid.") % (location, )) except errors.FileExists, err: if not use_existing_dir: raise errors.BzrCommandError( gettext("Target directory %s" " already exists, but does not have a .bzr" " directory. Supply --use-existing-dir to push" " there anyway.") % location) # This shouldn't occur, but if it does the FileExists error will be # more informative than an UnboundLocalError for br_to. raise
def test_nicks(self): """Test explicit and implicit branch nicknames. Nicknames are implicitly the name of the branch's directory, unless an explicit nickname is set. That is, an explicit nickname always overrides the implicit one. """ t = get_transport(self.get_url()) branch = self.make_branch('bzr.dev') # The nick will be 'bzr.dev', because there is no explicit nick set. self.assertEqual(branch.nick, 'bzr.dev') # Move the branch to a different directory, 'bzr.ab'. Now that branch # will report its nick as 'bzr.ab'. t.move('bzr.dev', 'bzr.ab') branch = Branch.open(self.get_url('bzr.ab')) self.assertEqual(branch.nick, 'bzr.ab') # Set the branch nick explicitly. This will ensure there's a branch # config file in the branch. branch.nick = "Aaron's branch" if not isinstance(branch, remote.RemoteBranch): self.failUnless(branch._transport.has("branch.conf")) # Because the nick has been set explicitly, the nick is now always # "Aaron's branch", regardless of directory name. self.assertEqual(branch.nick, "Aaron's branch") t.move('bzr.ab', 'integration') branch = Branch.open(self.get_url('integration')) self.assertEqual(branch.nick, "Aaron's branch") branch.nick = u"\u1234" self.assertEqual(branch.nick, u"\u1234")
def test_scans_content_root(self): base = self.setup_memory() root = base + 'path/myname' t = get_transport(base) t = t.clone('path') contentdir = t contentdir.create_prefix() contentdir.mkdir('dir1') contentdir.mkdir('dir2') contentdir.put_bytes('abc', '1234567890\n') contentdir.put_bytes('dir1/def', 'abcdef') ui, cmd = self.get_test_ui_and_cmd((root,)) self.assertEqual(0, cmd.execute()) t = t.clone('.lmirror/metadata/myname') self.assertThat(t.get_bytes('metadata.conf'), DocTestMatches("""[metadata] basis = 0 latest = 1 timestamp = ... updating = False """, ELLIPSIS)) self.assertThat(t.get_bytes('journals/0'), DocTestMatches("""l-mirror-journal-2 """)) self.assertThat(t.get_bytes('journals/1'), DocTestMatches("""l-mirror-journal-2 .lmirror\x00new\x00dir\x00.lmirror/sets\x00new\x00dir\x00.lmirror/sets/myname\x00new\x00dir\x00.lmirror/sets/myname/format\x00new\x00file\x00e5fa44f2b31c1fb553b6021e7360d07d5d91ff5e\x002\x000.000000\x00.lmirror/sets/myname/set.conf\x00new\x00file\x00061df21cf828bb333660621c3743cfc3a3b2bd23\x0023\x000.000000\x00abc\x00new\x00file\x0012039d6dd9a7e27622301e935b6eefc78846802e\x0011\x000.000000\x00dir1\x00new\x00dir\x00dir1/def\x00new\x00file\x001f8ac10f23c5b5bc1167bda84b833e5c057a77d2\x006\x000.000000\x00dir2\x00new\x00dir"""))
def createDestinationBranch(self, source_branch, destination_url): """Create a destination branch for 'source_branch'. Creates a branch at 'destination_url' that is has the same format as 'source_branch'. Any content already at 'destination_url' will be deleted. Generally the new branch will have no revisions, but they will be copied for import branches, because this can be done safely and efficiently with a vfs-level copy (see `ImportedBranchPolicy`). :param source_branch: The Bazaar branch that will be mirrored. :param destination_url: The place to make the destination branch. This URL must point to a writable location. :return: The destination branch. """ dest_transport = get_transport(destination_url) if dest_transport.has('.'): dest_transport.delete_tree('.') if isinstance(source_branch, LoomSupport): # Looms suck. revision_id = None else: revision_id = 'null:' source_branch.bzrdir.clone_on_transport( dest_transport, revision_id=revision_id) return Branch.open(destination_url)
def _open_dir(self, url): """Simple BzrDir.open clone that only uses specific probers. :param url: URL to open :return: ControlDir instance """ def redirected(transport, e, redirection_notice): self.policy.checkOneURL(e.target) redirected_transport = transport._redirected_to( e.source, e.target) if redirected_transport is None: raise errors.NotBranchError(e.source) trace.note('%s is%s redirected to %s', transport.base, e.permanently, redirected_transport.base) return redirected_transport def find_format(transport): last_error = errors.NotBranchError(transport.base) for prober_kls in self.probers: prober = prober_kls() try: return transport, prober.probe_transport(transport) except errors.NotBranchError as e: last_error = e else: raise last_error transport = get_transport(url) transport, format = do_catching_redirections(find_format, transport, redirected) return format.open(transport)
def test_receive_replays_and_updates_metadata(self): basedir = get_transport(self.setup_memory()).clone('path') basedir.create_prefix() ui = self.get_test_ui() # two journals exist from this simple operation - 0 and 1, but # need 3, as new clones start with 0 mirror = mirrorset.initialise(basedir, 'myname', basedir, ui) basedir.create_prefix() basedir.mkdir('dir1') basedir.mkdir('dir2') basedir.put_bytes('abc', '1234567890\n') basedir.put_bytes('dir1/def', 'abcdef') mirror.finish_change() mirror.start_change() basedir.put_bytes('dada', '123456789a\n') mirror.finish_change() clonedir = basedir.clone('../clone') clonedir.create_prefix() clone = mirrorset.initialise(clonedir, 'myname', clonedir, ui) clone.cancel_change() clone.receive(mirror) mirrormeta = mirror._get_metadata() metadata = clone._get_metadata() self.assertEqual('2', metadata.get('metadata', 'latest')) self.assertEqual( mirrormeta.get('metadata', 'timestamp'), metadata.get('metadata', 'timestamp')) # check we got a file from each journal self.assertEqual('123456789a\n', clonedir.get_bytes('dada')) self.assertEqual('1234567890\n', clonedir.get_bytes('abc')) # And the journals should be identical. mirrorjournal = mirror._journaldir() clonejournal = clone._journaldir() self.assertEqual(mirrorjournal.get_bytes('1'), clonejournal.get_bytes('1')) self.assertEqual(mirrorjournal.get_bytes('2'), clonejournal.get_bytes('2'))
def test_aftp_degrade(self): t = transport.get_transport('aftp://host/path') self.failUnless(t.is_active) parent = t.clone('..') self.failUnless(parent.is_active) self.assertEqual('aftp://host/path', t.abspath(''))
def test_checks_when_there_is_a_keyring(self): basedir = get_transport(self.setup_memory()).clone('path') basedir.create_prefix() ui = self.get_test_ui() mirror = mirrorset.initialise(basedir, 'myname', basedir, ui) mirror.gpg_strategy = bzrgpg.LoopbackGPGStrategy(None) t = basedir.clone('.lmirror/sets/myname') t.put_bytes('lmirror.gpg', '') mirror.finish_change() metadatadir = mirror._metadatadir() self.assertEqual( "-----BEGIN PSEUDO-SIGNED CONTENT-----\n" + metadatadir.get_bytes('journals/1') + "-----END PSEUDO-SIGNED CONTENT-----\n", metadatadir.get_bytes('journals/1.sig')) clonedir = basedir.clone('../clone') clonedir.create_prefix() clone = mirrorset.initialise(clonedir, 'myname', clonedir, ui) clone.cancel_change() clone.gpgv_strategy = gpg.TestGPGVStrategy([metadatadir.get_bytes('journals/1')]) clone.receive(mirror) metadata = clone._get_metadata() self.assertEqual('1', metadata.get('metadata', 'latest')) self.assertEqual( "-----BEGIN PSEUDO-SIGNED CONTENT-----\n" + metadatadir.get_bytes('journals/1') + "-----END PSEUDO-SIGNED CONTENT-----\n", clone._metadatadir().get_bytes('journals/1.sig'))
def test_new_replace_delete(self): # Just check all types are handled basedir = get_transport(self.setup_memory()).clone('path') basedir.create_prefix() basedir.put_bytes('abc', 'def') basedir.put_bytes('bye', 'by') sourcedir = basedir.clone('../source') sourcedir.create_prefix() sourcedir.put_bytes('abc', '123412341234') sourcedir.put_bytes('new', '12341234') j1 = journals.Journal() j1.add('abc', 'replace', ( journals.FileContent('12039d6dd9a7e27622301e935b6eefc78846802e', 3, None), journals.FileContent('5a78babbb162531b3a16c55310a4e7228d68f2e9', 12, None))) j1.add('bye', 'del', journals.FileContent('d', 2, None)) j1.add('new', 'new', journals.FileContent('c129b324aee662b04eccf68babba85851346dff9', 8, None)) ui = UI() stream = journals.ReplayGenerator(j1, sourcedir, ui) reference_stream = [] for item in stream.stream(): reference_stream.append((item.type, item.path, item.content)) content = b''.join(stream.as_bytes()) source = BytesIO(content) replay = journals.FromFileGenerator(source, ui) file_stream = [] for item in replay.stream(): file_stream.append((item.type, item.path, item.content)) item.ignore_file() self.assertEqual(reference_stream, file_stream)
def test_filter_callback(self): ui = self.get_test_ui() now = time.time() four_seconds = now - 4 basedir = get_transport(self.setup_memory()).clone('path') basedir.create_prefix() ui = self.get_test_ui() basedir.create_prefix() basedir.mkdir('dir1') basedir.mkdir('dir2') basedir.put_bytes('abc', '1234567890\n') basedir.put_bytes('dir1/def', 'abcdef') last_timestamp = 0 # get everything paths = [] results = {'abc': None, 'dir1':True, 'dir2':False, 'dir1/def':None} def filter(path): """Filter path: :return: True to include a path, False to excldue it, and None to not influence it at all. """ paths.append(path) return results[path] updater = journals.DiskUpdater({}, basedir, 'name', last_timestamp, ui, filter_callback=filter) journal = updater.finished() expected = { 'dir1': ('new', journals.DirContent()), 'abc': ('new', journals.FileContent('12039d6dd9a7e27622301e935b6eefc78846802e', 11, 0)), 'dir1/def': ('new', journals.FileContent('1f8ac10f23c5b5bc1167bda84b833e5c057a77d2', 6, 0)) } self.assertEqual(expected, journal.paths) self.assertEqual(set(['dir2', 'dir1', 'abc', 'dir1/def']), set(paths))
def initrepo(path): format = bzrdir.format_registry.make_bzrdir("default") to_transport = transport.get_transport(path) to_transport.ensure_base() newdir = format.initialize_on_transport(to_transport) repo = newdir.create_repository(shared=True) repo.set_make_working_trees(False) # We don't want trees.
def setUp(self): TestCaseInTempDir.setUp(self) self.server = self.VirtualServer( FatLocalTransport(local_path_to_url('.'))) self.server.start_server() self.addCleanup(self.server.stop_server) self.transport = get_transport(self.server.get_url())
def setUp(self, backing_transport_server=None, client_path_extra='/extra/'): """Set up server for testing. :param backing_transport_server: backing server to use. If not specified, a LocalURLServer at the current working directory will be used. :param client_path_extra: a path segment starting with '/' to append to the root URL for this server. For instance, a value of '/foo/bar/' will mean the root of the backing transport will be published at a URL like `bzr://127.0.0.1:nnnn/foo/bar/`, rather than `bzr://127.0.0.1:nnnn/`. Default value is `extra`, so that tests by default will fail unless they do the necessary path translation. """ if not client_path_extra.startswith('/'): raise ValueError(client_path_extra) from bzrlib.transport.chroot import ChrootServer if backing_transport_server is None: from bzrlib.transport.local import LocalURLServer backing_transport_server = LocalURLServer() self.chroot_server = ChrootServer( self.get_backing_transport(backing_transport_server)) self.chroot_server.setUp() self.backing_transport = transport.get_transport( self.chroot_server.get_url()) self.root_client_path = self.client_path_extra = client_path_extra self.start_background_thread(self.thread_name_suffix)
def test_unbinding(self): from bzrlib.transport import get_transport b_base, wt_child = self.create_branches() # TestCaseWithSFTPServer only allows you to connect one time # to the SFTP server. So we have to create a connection and # keep it around, so that it can be reused __unused_t = get_transport(self.get_url('.')) wt_base = b_base.bzrdir.open_workingtree() open('base/a', 'wb').write('new base contents\n') wt_base.commit('base', rev_id='r@b-2') open('child/b', 'wb').write('new b child contents\n') self.assertRaises(errors.BoundBranchOutOfDate, wt_child.commit, 'child', rev_id='r@c-2') self.assertEqual(['r@b-1'], wt_child.branch.revision_history()) wt_child.branch.unbind() wt_child.commit('child', rev_id='r@c-2') self.assertEqual(['r@b-1', 'r@c-2'], wt_child.branch.revision_history()) self.assertEqual(['r@b-1', 'r@b-2'], b_base.revision_history()) sftp_b_base = Branch.open(self.get_url('base')) self.assertRaises(errors.DivergedBranches, wt_child.branch.bind, sftp_b_base)
def _open_dir(self, url): """Simple BzrDir.open clone that only uses specific probers. :param url: URL to open :return: ControlDir instance """ def redirected(transport, e, redirection_notice): self.policy.checkOneURL(e.target) redirected_transport = transport._redirected_to(e.source, e.target) if redirected_transport is None: raise errors.NotBranchError(e.source) trace.note('%s is%s redirected to %s', transport.base, e.permanently, redirected_transport.base) return redirected_transport def find_format(transport): last_error = errors.NotBranchError(transport.base) for prober_kls in self.probers: prober = prober_kls() try: return transport, prober.probe_transport(transport) except errors.NotBranchError as e: last_error = e else: raise last_error transport = get_transport(url) transport, format = do_catching_redirections(find_format, transport, redirected) return format.open(transport)
def test_reconcile_wrong_order(self): # a wrong order in primary parents is optionally correctable t = get_transport(self.get_url()).clone('wrong-first-parent') d = bzrlib.bzrdir.BzrDir.open_from_transport(t) repo = d.open_repository() repo.lock_read() try: g = repo.get_graph() if g.get_parent_map(['wrong-first-parent'])['wrong-first-parent'] \ == ('1', '2'): raise TestSkipped( 'wrong-first-parent is not setup for testing') finally: repo.unlock() self.checkUnreconciled(d, repo.reconcile()) # nothing should have been altered yet : inventories without # revisions are not data loss incurring for current format reconciler = repo.reconcile(thorough=True) # these show up as inconsistent parents self.assertEqual(1, reconciler.inconsistent_parents) # and no garbage inventories self.assertEqual(0, reconciler.garbage_inventories) # and should have been fixed: repo.lock_read() self.addCleanup(repo.unlock) g = repo.get_graph() self.assertEqual({'wrong-first-parent': ('1', '2')}, g.get_parent_map(['wrong-first-parent']))
def test_error_creating_existing_set(self): basedir = get_transport(self.setup_memory()).clone('path') basedir.create_prefix() ui = self.get_test_ui() mirror = mirrorset.initialise(basedir, 'myname', basedir, ui) self.assertRaises(ValueError, mirrorset.initialise, basedir, 'myname', basedir, ui)
def main(self): force_bzr_to_use_urllib() set_default_timeout_function(lambda: 60.0) source_details = CodeImportSourceDetails.fromArguments(self.args) if source_details.rcstype == 'git': if source_details.target_rcstype == 'bzr': import_worker_cls = GitImportWorker else: import_worker_cls = GitToGitImportWorker elif source_details.rcstype == 'bzr-svn': import_worker_cls = BzrSvnImportWorker elif source_details.rcstype == 'bzr': import_worker_cls = BzrImportWorker elif source_details.rcstype == 'cvs': import_worker_cls = CSCVSImportWorker else: raise AssertionError('unknown rcstype %r' % source_details.rcstype) opener_policy = opener_policies[self.options.access_policy]( source_details.rcstype, source_details.target_rcstype) if source_details.target_rcstype == 'bzr': import_worker = import_worker_cls( source_details, get_transport(config.codeimport.foreign_tree_store), get_default_bazaar_branch_store(), self.logger, opener_policy) else: import_worker = import_worker_cls(source_details, self.logger, opener_policy) return import_worker.run()
def test_upgrade_explicit_knit(self): # users can force an upgrade to knit format from a metadir weave # branch url = get_transport(self.get_url('metadir_weave_branch')).base # check --format takes effect bzrdir.BzrDirFormat._set_default_format(bzrdir.BzrDirFormat5()) (out, err) = self.run_bzr(['upgrade', '--format=knit', url]) self.assertEqualDiff( """starting upgrade of %s making backup of tree history %s.bzr has been backed up to %sbackup.bzr if conversion fails, you can move this directory back to .bzr if it succeeds, you can remove this directory if you wish starting repository conversion repository converted finished """ % (url, url, url), out) self.assertEqualDiff("", err) converted_dir = bzrdir.BzrDir.open( self.get_url('metadir_weave_branch')) self.assertTrue( isinstance(converted_dir._format, bzrdir.BzrDirMetaFormat1)) self.assertTrue( isinstance(converted_dir.open_repository()._format, RepositoryFormatKnit1))
def test_reconcile_wrong_order(self): # a wrong order in primary parents is optionally correctable t = get_transport(self.get_url()).clone('wrong-first-parent') d = bzrlib.bzrdir.BzrDir.open_from_transport(t) repo = d.open_repository() repo.lock_read() try: g = repo.get_graph() if g.get_parent_map(['wrong-first-parent'])['wrong-first-parent'] \ == ('1', '2'): raise TestSkipped('wrong-first-parent is not setup for testing') finally: repo.unlock() self.checkUnreconciled(d, repo.reconcile()) # nothing should have been altered yet : inventories without # revisions are not data loss incurring for current format reconciler = repo.reconcile(thorough=True) # these show up as inconsistent parents self.assertEqual(1, reconciler.inconsistent_parents) # and no garbage inventories self.assertEqual(0, reconciler.garbage_inventories) # and should have been fixed: repo.lock_read() self.addCleanup(repo.unlock) g = repo.get_graph() self.assertEqual( {'wrong-first-parent':('1', '2')}, g.get_parent_map(['wrong-first-parent']))
def test_orders_new_replace_delete(self): # new-replace-delete is a sane default order. basedir = get_transport('trace+' + self.setup_memory()).clone('path') basedir.create_prefix() basedir.put_bytes('abc', 'def') basedir.put_bytes('bye', 'by') sourcedir = basedir.clone('../source') sourcedir.create_prefix() sourcedir.put_bytes('abc', '123412341234') sourcedir.put_bytes('new', '12341234') j1 = journals.Journal() j1.add('abc', 'replace', ( journals.FileContent('12039d6dd9a7e27622301e935b6eefc78846802e', 3, None), journals.FileContent('5a78babbb162531b3a16c55310a4e7228d68f2e9', 12, None))) j1.add('bye', 'del', journals.FileContent('d', 2, None)) j1.add('new', 'new', journals.FileContent('c129b324aee662b04eccf68babba85851346dff9', 8, None)) del basedir._activity[:] ui = UI() generator = journals.ReplayGenerator(j1, sourcedir, ui) replay = journals.TransportReplay(j1, generator, basedir, ui) replay.replay() self.assertEqual([('get', 'new'), ('rename', 'new.lmirrortemp', 'new'), ('get', 'abc'), ('get', 'abc'), ('delete', 'abc'), ('rename', 'abc.lmirrortemp', 'abc'), ('delete', 'bye')], basedir._activity)
def run(self, user_id, port=None, branch_directory=None, codehosting_endpoint_url=None, inet=False, protocol=None): from lp.codehosting.bzrutils import install_oops_handler from lp.codehosting.vfs import get_lp_server, hooks install_oops_handler(user_id) four_gig = int(4e9) resource.setrlimit(resource.RLIMIT_AS, (four_gig, four_gig)) seen_new_branch = hooks.SetProcTitleHook() if protocol is None: protocol = transport_server_registry.get() lp_server = get_lp_server( int(user_id), codehosting_endpoint_url, branch_directory, seen_new_branch.seen) lp_server.start_server() try: old_lockdir_timeout = lockdir._DEFAULT_TIMEOUT_SECONDS lp_transport = get_transport(lp_server.get_url()) host, port = self.get_host_and_port(port) lockdir._DEFAULT_TIMEOUT_SECONDS = 0 try: protocol(lp_transport, host, port, inet) finally: lockdir._DEFAULT_TIMEOUT_SECONDS = old_lockdir_timeout finally: lp_server.stop_server()