def _reconcile_revision_history(self): last_revno, last_revision_id = self.branch.last_revision_info() real_history = [] graph = self.branch.repository.get_graph() try: for revid in graph.iter_lefthand_ancestry( last_revision_id, (_mod_revision.NULL_REVISION,)): real_history.append(revid) except errors.RevisionNotPresent: pass # Hit a ghost left hand parent real_history.reverse() if last_revno != len(real_history): self.fixed_history = True # Technically for Branch5 formats, it is more efficient to use # set_revision_history, as this will regenerate it again. # Not really worth a whole BranchReconciler class just for this, # though. ui.ui_factory.note(gettext('Fixing last revision info {0} '\ ' => {1}').format( last_revno, len(real_history))) self.branch.set_last_revision_info(len(real_history), last_revision_id) else: self.fixed_history = False ui.ui_factory.note(gettext('revision_history ok.'))
def _emit_progress(self): if self.pb_entries_count is not None: text = gettext("{0} [{1}] - Stage").format(self.pb_stage_name, self.pb_entries_count) else: text = gettext("%s - Stage") % (self.pb_stage_name, ) self.pb.update(text, self.pb_stage_count, self.pb_stage_total)
def _convert_items(items, format, clean_up, dry_run, label=None): """Convert a sequence of control directories to the given format. :param items: the control directories to upgrade :param format: the format to convert to or None for the best default :param clean-up: if True, the backup.bzr directory is removed if the upgrade succeeded for a given repo/branch/tree :param dry_run: show what would happen but don't actually do any upgrades :param label: the label for these items or None to calculate one :return: items successfully upgraded, exceptions """ succeeded = [] exceptions = [] child_pb = ui.ui_factory.nested_progress_bar() child_pb.update(gettext('Upgrading bzrdirs'), 0, len(items)) for i, control_dir in enumerate(items): # Do the conversion location = control_dir.root_transport.base bzr_object, bzr_label = _get_object_and_label(control_dir) type_label = label or bzr_label child_pb.update(gettext("Upgrading %s") % (type_label), i+1, len(items)) ui.ui_factory.note(gettext('Upgrading {0} {1} ...').format(type_label, urlutils.unescape_for_display(location, 'utf-8'),)) try: if not dry_run: cv = Convert(control_dir=control_dir, format=format) except errors.UpToDateFormat, ex: ui.ui_factory.note(str(ex)) succeeded.append(control_dir) continue except Exception, ex: trace.warning('conversion error: %s' % ex) exceptions.append(ex) continue
def __fetch(self): """Primary worker function. This initialises all the needed variables, and then fetches the requested revisions, finally clearing the progress bar. """ # Roughly this is what we're aiming for fetch to become: # # missing = self.sink.insert_stream(self.source.get_stream(search)) # if missing: # missing = self.sink.insert_stream(self.source.get_items(missing)) # assert not missing self.count_total = 0 self.file_ids_names = {} pb = ui.ui_factory.nested_progress_bar() pb.show_pct = pb.show_count = False try: pb.update(gettext("Finding revisions"), 0, 2) search_result = self._revids_to_fetch() mutter('fetching: %s', search_result) if search_result.is_empty(): return pb.update(gettext("Fetching revisions"), 1, 2) self._fetch_everything_for_search(search_result) finally: pb.finished()
def _reconcile_revision_history(self): last_revno, last_revision_id = self.branch.last_revision_info() real_history = [] graph = self.branch.repository.get_graph() try: for revid in graph.iter_lefthand_ancestry( last_revision_id, (_mod_revision.NULL_REVISION, )): real_history.append(revid) except errors.RevisionNotPresent: pass # Hit a ghost left hand parent real_history.reverse() if last_revno != len(real_history): self.fixed_history = True # Technically for Branch5 formats, it is more efficient to use # set_revision_history, as this will regenerate it again. # Not really worth a whole BranchReconciler class just for this, # though. ui.ui_factory.note(gettext('Fixing last revision info {0} '\ ' => {1}').format( last_revno, len(real_history))) self.branch.set_last_revision_info(len(real_history), last_revision_id) else: self.fixed_history = False ui.ui_factory.note(gettext('revision_history ok.'))
def run(self): """Perform the unshelving operation.""" self.tree.lock_tree_write() cleanups = [self.tree.unlock] try: if self.read_shelf: trace.note(gettext('Using changes with id "%d".') % self.shelf_id) unshelver = self.manager.get_unshelver(self.shelf_id) cleanups.append(unshelver.finalize) if unshelver.message is not None: trace.note(gettext('Message: %s') % unshelver.message) change_reporter = delta._ChangeReporter() merger = unshelver.make_merger(None) merger.change_reporter = change_reporter if self.apply_changes: merger.do_merge() elif self.show_diff: self.write_diff(merger) else: self.show_changes(merger) if self.delete_shelf: self.manager.delete_shelf(self.shelf_id) trace.note(gettext('Deleted changes with id "%d".') % self.shelf_id) finally: for cleanup in reversed(cleanups): cleanup()
def _show_push_branch(br_from, revision_id, location, to_file, verbose=False, overwrite=False, remember=False, stacked_on=None, create_prefix=False, use_existing_dir=False, no_tree=False): """Push a branch to a location. :param br_from: the source branch :param revision_id: the revision-id to push up to :param location: the url of the destination :param to_file: the output stream :param verbose: if True, display more output than normal :param overwrite: list of things to overwrite ("history", "tags") or boolean indicating for everything :param remember: if True, store the location as the push location for the source branch :param stacked_on: the url of the branch, if any, to stack on; if set, only the revisions not in that branch are pushed :param create_prefix: if True, create the necessary parent directories at the destination if they don't already exist :param use_existing_dir: if True, proceed even if the destination directory exists without a current .bzr directory in it """ to_transport = transport.get_transport(location) try: dir_to = controldir.ControlDir.open_from_transport(to_transport) except errors.NotBranchError: # Didn't find anything dir_to = None if dir_to is None: try: br_to = br_from.create_clone_on_transport( to_transport, revision_id=revision_id, stacked_on=stacked_on, create_prefix=create_prefix, use_existing_dir=use_existing_dir, no_tree=no_tree) except errors.AlreadyControlDirError, err: raise errors.BzrCommandError( gettext( "Target directory %s already contains a .bzr directory, " "but it is not valid.") % (location, )) except errors.FileExists, err: if not use_existing_dir: raise errors.BzrCommandError( gettext("Target directory %s" " already exists, but does not have a .bzr" " directory. Supply --use-existing-dir to push" " there anyway.") % location) # This shouldn't occur, but if it does the FileExists error will be # more informative than an UnboundLocalError for br_to. raise
def run(self, name=None, no_check=False, verbose=False): # This is totally separate from any launchpadlib login system. from bzrlib.plugins.launchpad import account check_account = not no_check if name is None: username = account.get_lp_login() if username: if check_account: account.check_lp_login(username) if verbose: self.outf.write( gettext( "Launchpad user ID exists and has SSH keys.\n") ) self.outf.write(username + '\n') else: self.outf.write(gettext('No Launchpad user ID configured.\n')) return 1 else: name = name.lower() if check_account: account.check_lp_login(name) if verbose: self.outf.write( gettext( "Launchpad user ID exists and has SSH keys.\n")) account.set_lp_login(name) if verbose: self.outf.write( gettext("Launchpad user ID set to '%s'.\n") % (name, ))
def run(self): """Perform the unshelving operation.""" self.tree.lock_tree_write() cleanups = [self.tree.unlock] try: if self.read_shelf: trace.note( gettext('Using changes with id "%d".') % self.shelf_id) unshelver = self.manager.get_unshelver(self.shelf_id) cleanups.append(unshelver.finalize) if unshelver.message is not None: trace.note(gettext('Message: %s') % unshelver.message) change_reporter = delta._ChangeReporter() merger = unshelver.make_merger(None) merger.change_reporter = change_reporter if self.apply_changes: merger.do_merge() elif self.show_diff: self.write_diff(merger) else: self.show_changes(merger) if self.delete_shelf: self.manager.delete_shelf(self.shelf_id) trace.note( gettext('Deleted changes with id "%d".') % self.shelf_id) finally: for cleanup in reversed(cleanups): cleanup()
def run(self, name=None, no_check=False, verbose=False): # This is totally separate from any launchpadlib login system. from bzrlib.plugins.launchpad import account check_account = not no_check if name is None: username = account.get_lp_login() if username: if check_account: account.check_lp_login(username) if verbose: self.outf.write(gettext( "Launchpad user ID exists and has SSH keys.\n")) self.outf.write(username + '\n') else: self.outf.write(gettext('No Launchpad user ID configured.\n')) return 1 else: name = name.lower() if check_account: account.check_lp_login(name) if verbose: self.outf.write(gettext( "Launchpad user ID exists and has SSH keys.\n")) account.set_lp_login(name) if verbose: self.outf.write(gettext("Launchpad user ID set to '%s'.\n") % (name,))
def delete_items(deletables, dry_run=False): """Delete files in the deletables iterable""" def onerror(function, path, excinfo): """Show warning for errors seen by rmtree. """ # Handle only permission error while removing files. # Other errors are re-raised. if function is not os.remove or excinfo[1].errno != errno.EACCES: raise ui.ui_factory.show_warning(gettext('unable to remove %s') % path) has_deleted = False for path, subp in deletables: if not has_deleted: note(gettext("deleting paths:")) has_deleted = True if not dry_run: if isdir(path): shutil.rmtree(path, onerror=onerror) else: try: os.unlink(path) note(' ' + subp) except OSError, e: # We handle only permission error here if e.errno != errno.EACCES: raise e ui.ui_factory.show_warning(gettext( 'unable to remove "{0}": {1}.').format( path, e.strerror)) else: note(' ' + subp)
def _load_indexes(self): """Load indexes for the reconciliation.""" self.transaction = self.repo.get_transaction() self.pb.update(gettext('Reading indexes'), 0, 2) self.inventory = self.repo.inventories self.pb.update(gettext('Reading indexes'), 1, 2) self.repo._check_for_inconsistent_revision_parents() self.revisions = self.repo.revisions self.pb.update(gettext('Reading indexes'), 2, 2)
def _find_proposals(self, revno, b, pb): from bzrlib.plugins.launchpad import (lp_api, lp_registration) launchpad = lp_api.login(lp_registration.LaunchpadService()) pb.update(gettext('Finding Launchpad branch')) lpb = lp_api.LaunchpadBranch.from_bzr(launchpad, b, create_missing=False) pb.update(gettext('Finding proposals')) return list(lpb.lp.getMergeProposals(status=['Merged'], merged_revnos=[revno]))
def report(self, to_file): """Write a human-readable description of the result.""" if self.branch_push_result is None: if self.stacked_on is not None: note(gettext('Created new stacked branch referring to %s.') % self.stacked_on) else: note(gettext('Created new branch.')) else: self.branch_push_result.report(to_file)
def report(self, to_file): """Write a human-readable description of the result.""" if self.branch_push_result is None: if self.stacked_on is not None: note( gettext('Created new stacked branch referring to %s.') % self.stacked_on) else: note(gettext('Created new branch.')) else: self.branch_push_result.report(to_file)
def run(self, public_url=None, project='', product=None, branch_name='', branch_title='', branch_description='', author='', link_bug=None, dry_run=False): from bzrlib.plugins.launchpad.lp_registration import ( BranchRegistrationRequest, BranchBugLinkRequest, DryRunLaunchpadService, LaunchpadService) if public_url is None: try: b = _mod_branch.Branch.open_containing('.')[0] except NotBranchError: raise BzrCommandError(gettext( 'register-branch requires a public ' 'branch url - see bzr help register-branch.')) public_url = b.get_public_branch() if public_url is None: raise NoPublicBranch(b) if product is not None: project = product trace.note(gettext( '--product is deprecated; please use --project.')) rego = BranchRegistrationRequest(branch_url=public_url, branch_name=branch_name, branch_title=branch_title, branch_description=branch_description, product_name=project, author_email=author, ) linko = BranchBugLinkRequest(branch_url=public_url, bug_id=link_bug) if not dry_run: service = LaunchpadService() # This gives back the xmlrpc url that can be used for future # operations on the branch. It's not so useful to print to the # user since they can't do anything with it from a web browser; it # might be nice for the server to tell us about an html url as # well. else: # Run on service entirely in memory service = DryRunLaunchpadService() service.gather_user_credentials() rego.submit(service) if link_bug: linko.submit(service) self.outf.write('Branch registered.\n')
def _filter_iter_changes(self, iter_changes): """Process iter_changes. This method reports on the changes in iter_changes to the user, and converts 'missing' entries in the iter_changes iterator to 'deleted' entries. 'missing' entries have their :param iter_changes: An iter_changes to process. :return: A generator of changes. """ reporter = self.reporter report_changes = reporter.is_verbose() deleted_ids = [] for change in iter_changes: if report_changes: old_path = change[1][0] new_path = change[1][1] versioned = change[3][1] kind = change[6][1] versioned = change[3][1] if kind is None and versioned: # 'missing' path if report_changes: reporter.missing(new_path) deleted_ids.append(change[0]) # Reset the new path (None) and new versioned flag (False) change = (change[0], (change[1][0], None), change[2], (change[3][0], False)) + change[4:] new_path = change[1][1] versioned = False elif kind == 'tree-reference': if self.recursive == 'down': self._commit_nested_tree(change[0], change[1][1]) if change[3][0] or change[3][1]: yield change if report_changes: if new_path is None: reporter.deleted(old_path) elif old_path is None: reporter.snapshot_change(gettext('added'), new_path) elif old_path != new_path: reporter.renamed(gettext('renamed'), old_path, new_path) else: if (new_path or self.work_tree.branch.repository. _format.rich_root_data): # Don't report on changes to '' in non rich root # repositories. reporter.snapshot_change(gettext('modified'), new_path) self._next_progress_entry() # Unversion IDs that were found to be deleted self.deleted_ids = deleted_ids
def create_now(cls, launchpad, bzr_branch): """Create a Bazaar branch on Launchpad for the supplied branch.""" url = cls.tweak_url(bzr_branch.get_push_location(), launchpad) if not cls.plausible_launchpad_url(url): raise errors.BzrError(gettext('%s is not registered on Launchpad') % bzr_branch.base) bzr_branch.create_clone_on_transport(transport.get_transport(url)) lp_branch = launchpad.branches.getByUrl(url=url) if lp_branch is None: raise errors.BzrError(gettext('%s is not registered on Launchpad') % url) return lp_branch
def create_now(cls, launchpad, bzr_branch): """Create a Bazaar branch on Launchpad for the supplied branch.""" url = cls.tweak_url(bzr_branch.get_push_location(), launchpad) if not cls.plausible_launchpad_url(url): raise errors.BzrError( gettext('%s is not registered on Launchpad') % bzr_branch.base) bzr_branch.create_clone_on_transport(transport.get_transport(url)) lp_branch = launchpad.branches.getByUrl(url=url) if lp_branch is None: raise errors.BzrError( gettext('%s is not registered on Launchpad') % url) return lp_branch
def run(self, public_url=None, project='', product=None, branch_name='', branch_title='', branch_description='', author='', link_bug=None, dry_run=False): from bzrlib.plugins.launchpad.lp_registration import ( BranchRegistrationRequest, BranchBugLinkRequest, DryRunLaunchpadService, LaunchpadService) if public_url is None: try: b = _mod_branch.Branch.open_containing('.')[0] except NotBranchError: raise BzrCommandError( gettext('register-branch requires a public ' 'branch url - see bzr help register-branch.')) public_url = b.get_public_branch() if public_url is None: raise NoPublicBranch(b) if product is not None: project = product trace.note( gettext('--product is deprecated; please use --project.')) rego = BranchRegistrationRequest( branch_url=public_url, branch_name=branch_name, branch_title=branch_title, branch_description=branch_description, product_name=project, author_email=author, ) linko = BranchBugLinkRequest(branch_url=public_url, bug_id=link_bug) if not dry_run: service = LaunchpadService() # This gives back the xmlrpc url that can be used for future # operations on the branch. It's not so useful to print to the # user since they can't do anything with it from a web browser; it # might be nice for the server to tell us about an html url as # well. else: # Run on service entirely in memory service = DryRunLaunchpadService() service.gather_user_credentials() rego.submit(service) if link_bug: linko.submit(service) self.outf.write('Branch registered.\n')
def _filter_iter_changes(self, iter_changes): """Process iter_changes. This method reports on the changes in iter_changes to the user, and converts 'missing' entries in the iter_changes iterator to 'deleted' entries. 'missing' entries have their :param iter_changes: An iter_changes to process. :return: A generator of changes. """ reporter = self.reporter report_changes = reporter.is_verbose() deleted_ids = [] for change in iter_changes: if report_changes: old_path = change[1][0] new_path = change[1][1] versioned = change[3][1] kind = change[6][1] versioned = change[3][1] if kind is None and versioned: # 'missing' path if report_changes: reporter.missing(new_path) deleted_ids.append(change[0]) # Reset the new path (None) and new versioned flag (False) change = (change[0], (change[1][0], None), change[2], (change[3][0], False)) + change[4:] new_path = change[1][1] versioned = False elif kind == 'tree-reference': if self.recursive == 'down': self._commit_nested_tree(change[0], change[1][1]) if change[3][0] or change[3][1]: yield change if report_changes: if new_path is None: reporter.deleted(old_path) elif old_path is None: reporter.snapshot_change(gettext('added'), new_path) elif old_path != new_path: reporter.renamed(gettext('renamed'), old_path, new_path) else: if (new_path or self.work_tree.branch.repository._format.rich_root_data): # Don't report on changes to '' in non rich root # repositories. reporter.snapshot_change(gettext('modified'), new_path) self._next_progress_entry() # Unversion IDs that were found to be deleted self.deleted_ids = deleted_ids
def _wait_for_clients_to_disconnect(self): self._poll_active_connections() if not self._active_connections: return trace.note(gettext('Waiting for %d client(s) to finish') % (len(self._active_connections),)) t_next_log = self._timer() + self._LOG_WAITING_TIMEOUT while self._active_connections: now = self._timer() if now >= t_next_log: trace.note(gettext('Still waiting for %d client(s) to finish') % (len(self._active_connections),)) t_next_log = now + self._LOG_WAITING_TIMEOUT self._poll_active_connections(self._SHUTDOWN_POLL_TIMEOUT)
class ApplyReporter(ShelfReporter): vocab = { 'add file': gettext('Delete file "%(path)s"?'), 'binary': gettext('Apply binary changes?'), 'change kind': gettext('Change "%(path)s" from %(this)s' ' to %(other)s?'), 'delete file': gettext('Add file "%(path)s"?'), 'final': gettext('Apply %d change(s)?'), 'hunk': gettext('Apply change?'), 'modify target': gettext('Change target of' ' "%(path)s" from "%(this)s" to "%(other)s"?'), 'rename': gettext('Rename "%(this)s" => "%(other)s"?'), } invert_diff = True def changes_destroyed(self): pass
class ShelfReporter(object): vocab = { 'add file': gettext('Shelve adding file "%(path)s"?'), 'binary': gettext('Shelve binary changes?'), 'change kind': gettext('Shelve changing "%s" from %(other)s' ' to %(this)s?'), 'delete file': gettext('Shelve removing file "%(path)s"?'), 'final': gettext('Shelve %d change(s)?'), 'hunk': gettext('Shelve?'), 'modify target': gettext('Shelve changing target of' ' "%(path)s" from "%(other)s" to "%(this)s"?'), 'rename': gettext('Shelve renaming "%(other)s" =>' ' "%(this)s"?') } invert_diff = False def __init__(self): self.delta_reporter = delta._ChangeReporter() def no_changes(self): """Report that no changes were selected to apply.""" trace.warning('No changes to shelve.') def shelved_id(self, shelf_id): """Report the id changes were shelved to.""" trace.note(gettext('Changes shelved with id "%d".') % shelf_id) def changes_destroyed(self): """Report that changes were made without shelving.""" trace.note(gettext('Selected changes destroyed.')) def selected_changes(self, transform): """Report the changes that were selected.""" trace.note(gettext("Selected changes:")) changes = transform.iter_changes() delta.report_changes(changes, self.delta_reporter) def prompt_change(self, change): """Determine the prompt for a change to apply.""" if change[0] == 'rename': vals = {'this': change[3], 'other': change[2]} elif change[0] == 'change kind': vals = {'path': change[4], 'other': change[2], 'this': change[3]} elif change[0] == 'modify target': vals = {'path': change[2], 'other': change[3], 'this': change[4]} else: vals = {'path': change[3]} prompt = self.vocab[change[0]] % vals return prompt
def gssapi_login(self, user): # Try GSSAPI login first # Used FTP response codes: # 235 [ADAT=base64data] - indicates that the security data exchange # completed successfully. # 334 [ADAT=base64data] - indicates that the requested security # mechanism is ok, and includes security data to be used by the # client to construct the next command. # 335 [ADAT=base64data] - indicates that the security data is # acceptable, and more is required to complete the security # data exchange. resp = self.sendcmd('AUTH GSSAPI') if resp.startswith('334 '): rc, self.vc = kerberos.authGSSClientInit("ftp@%s" % self.host) if kerberos.authGSSClientStep(self.vc, "") != 1: while resp[:4] in ('334 ', '335 '): authdata = kerberos.authGSSClientResponse(self.vc) resp = self.sendcmd('ADAT ' + authdata) if resp[:9] in ('235 ADAT=', '335 ADAT='): rc = kerberos.authGSSClientStep(self.vc, resp[9:]) if not ((resp.startswith('235 ') and rc == 1) or (resp.startswith('335 ') and rc == 0)): raise ftplib.error_reply, resp note( gettext("Authenticated as %s") % kerberos.authGSSClientUserName(self.vc)) # Monkey patch ftplib self.putcmd = self.mic_putcmd self.getline = self.mic_getline self.sendcmd('USER ' + user) return resp mutter("Unable to use GSSAPI authentication: %s", resp)
def _wait_for_clients_to_disconnect(self): self._poll_active_connections() if not self._active_connections: return trace.note( gettext('Waiting for %d client(s) to finish') % (len(self._active_connections), )) t_next_log = self._timer() + self._LOG_WAITING_TIMEOUT while self._active_connections: now = self._timer() if now >= t_next_log: trace.note( gettext('Still waiting for %d client(s) to finish') % (len(self._active_connections), )) t_next_log = now + self._LOG_WAITING_TIMEOUT self._poll_active_connections(self._SHUTDOWN_POLL_TIMEOUT)
def upgrade(url, format=None, clean_up=False, dry_run=False): """Upgrade locations to format. This routine wraps the smart_upgrade() routine with a nicer UI. In particular, it ensures all URLs can be opened before starting and reports a summary at the end if more than one upgrade was attempted. This routine is useful for command line tools. Other bzrlib clients probably ought to use smart_upgrade() instead. :param url: a URL of the locations to upgrade. :param format: the format to convert to or None for the best default :param clean-up: if True, the backup.bzr directory is removed if the upgrade succeeded for a given repo/branch/tree :param dry_run: show what would happen but don't actually do any upgrades :return: the list of exceptions encountered """ control_dirs = [ControlDir.open_unsupported(url)] attempted, succeeded, exceptions = smart_upgrade(control_dirs, format, clean_up=clean_up, dry_run=dry_run) if len(attempted) > 1: attempted_count = len(attempted) succeeded_count = len(succeeded) failed_count = attempted_count - succeeded_count ui.ui_factory.note( gettext('\nSUMMARY: {0} upgrades attempted, {1} succeeded,'\ ' {2} failed').format( attempted_count, succeeded_count, failed_count)) return exceptions
def merge_bundle(reader, tree, check_clean, merge_type, reprocess, show_base, change_reporter=None): """Merge a revision bundle into the current tree.""" pb = ui.ui_factory.nested_progress_bar() try: pp = ProgressPhase("Merge phase", 6, pb) pp.next_phase() install_bundle(tree.branch.repository, reader) merger = Merger(tree.branch, this_tree=tree, change_reporter=change_reporter) merger.pp = pp merger.pp.next_phase() if check_clean and tree.has_changes(): raise errors.UncommittedChanges(self) merger.other_rev_id = reader.target merger.other_tree = merger.revision_tree(reader.target) merger.other_basis = reader.target merger.pp.next_phase() merger.find_base() if merger.base_rev_id == merger.other_rev_id: note(gettext("Nothing to do.")) return 0 merger.merge_type = merge_type merger.show_base = show_base merger.reprocess = reprocess conflicts = merger.do_merge() merger.set_pending() finally: pb.clear() return conflicts
def gssapi_login(self, user): # Try GSSAPI login first # Used FTP response codes: # 235 [ADAT=base64data] - indicates that the security data exchange # completed successfully. # 334 [ADAT=base64data] - indicates that the requested security # mechanism is ok, and includes security data to be used by the # client to construct the next command. # 335 [ADAT=base64data] - indicates that the security data is # acceptable, and more is required to complete the security # data exchange. resp = self.sendcmd('AUTH GSSAPI') if resp.startswith('334 '): rc, self.vc = kerberos.authGSSClientInit("ftp@%s" % self.host) if kerberos.authGSSClientStep(self.vc, "") != 1: while resp[:4] in ('334 ', '335 '): authdata = kerberos.authGSSClientResponse(self.vc) resp = self.sendcmd('ADAT ' + authdata) if resp[:9] in ('235 ADAT=', '335 ADAT='): rc = kerberos.authGSSClientStep(self.vc, resp[9:]) if not ((resp.startswith('235 ') and rc == 1) or (resp.startswith('335 ') and rc == 0)): raise ftplib.error_reply, resp note(gettext("Authenticated as %s") % kerberos.authGSSClientUserName(self.vc)) # Monkey patch ftplib self.putcmd = self.mic_putcmd self.getline = self.mic_getline self.sendcmd('USER ' + user) return resp mutter("Unable to use GSSAPI authentication: %s", resp)
def _send_0_9(branch, revision_id, submit_branch, public_branch, no_patch, no_bundle, message, base_revision_id, local_target_branch=None): if not no_bundle: if not no_patch: patch_type = 'bundle' else: raise errors.BzrCommandError( gettext('Format 0.9 does not' ' permit bundle with no patch')) else: if not no_patch: patch_type = 'diff' else: patch_type = None from bzrlib import merge_directive return merge_directive.MergeDirective.from_objects( branch.repository, revision_id, time.time(), osutils.local_time_offset(), submit_branch, public_branch=public_branch, patch_type=patch_type, message=message, local_target_branch=local_target_branch)
def _check_one_rev(self, rev_id, rev): """Cross-check one revision. :param rev_id: A revision id to check. :param rev: A revision or None to indicate a missing revision. """ if rev.revision_id != rev_id: self._report_items.append(gettext( 'Mismatched internal revid {{{0}}} and index revid {{{1}}}').format( rev.revision_id, rev_id)) rev_id = rev.revision_id # Check this revision tree etc, and count as seen when we encounter a # reference to it. self.planned_revisions.add(rev_id) # It is not a ghost self.ghosts.discard(rev_id) # Count all parents as ghosts if we haven't seen them yet. for parent in rev.parent_ids: if not parent in self.planned_revisions: self.ghosts.add(parent) self.ancestors[rev_id] = tuple(rev.parent_ids) or (NULL_REVISION,) self.add_pending_item(rev_id, ('inventories', rev_id), 'inventory', rev.inventory_sha1) self.checked_rev_cnt += 1
def _show_push_branch(br_from, revision_id, location, to_file, verbose=False, overwrite=False, remember=False, stacked_on=None, create_prefix=False, use_existing_dir=False, no_tree=False): """Push a branch to a location. :param br_from: the source branch :param revision_id: the revision-id to push up to :param location: the url of the destination :param to_file: the output stream :param verbose: if True, display more output than normal :param overwrite: list of things to overwrite ("history", "tags") or boolean indicating for everything :param remember: if True, store the location as the push location for the source branch :param stacked_on: the url of the branch, if any, to stack on; if set, only the revisions not in that branch are pushed :param create_prefix: if True, create the necessary parent directories at the destination if they don't already exist :param use_existing_dir: if True, proceed even if the destination directory exists without a current .bzr directory in it """ to_transport = transport.get_transport(location) try: dir_to = controldir.ControlDir.open_from_transport(to_transport) except errors.NotBranchError: # Didn't find anything dir_to = None if dir_to is None: try: br_to = br_from.create_clone_on_transport(to_transport, revision_id=revision_id, stacked_on=stacked_on, create_prefix=create_prefix, use_existing_dir=use_existing_dir, no_tree=no_tree) except errors.AlreadyControlDirError, err: raise errors.BzrCommandError(gettext( "Target directory %s already contains a .bzr directory, " "but it is not valid.") % (location,)) except errors.FileExists, err: if not use_existing_dir: raise errors.BzrCommandError(gettext("Target directory %s" " already exists, but does not have a .bzr" " directory. Supply --use-existing-dir to push" " there anyway.") % location) # This shouldn't occur, but if it does the FileExists error will be # more informative than an UnboundLocalError for br_to. raise
def _match_argform(cmd, takes_args, args): argdict = {} # step through args and takes_args, allowing appropriate 0-many matches for ap in takes_args: argname = ap[:-1] if ap[-1] == '?': if args: argdict[argname] = args.pop(0) elif ap[-1] == '*': # all remaining arguments if args: argdict[argname + '_list'] = args[:] args = [] else: argdict[argname + '_list'] = None elif ap[-1] == '+': if not args: raise errors.BzrCommandError(gettext( "command {0!r} needs one or more {1}").format( cmd, argname.upper())) else: argdict[argname + '_list'] = args[:] args = [] elif ap[-1] == '$': # all but one if len(args) < 2: raise errors.BzrCommandError( gettext("command {0!r} needs one or more {1}").format( cmd, argname.upper())) argdict[argname + '_list'] = args[:-1] args[:-1] = [] else: # just a plain arg argname = ap if not args: raise errors.BzrCommandError( gettext("command {0!r} requires argument {1}").format( cmd, argname.upper())) else: argdict[argname] = args.pop(0) if args: raise errors.BzrCommandError( gettext( "extra argument to command {0}: {1}").format( cmd, args[0]) ) return argdict
def onerror(function, path, excinfo): """Show warning for errors seen by rmtree. """ # Handle only permission error while removing files. # Other errors are re-raised. if function is not os.remove or excinfo[1].errno != errno.EACCES: raise ui.ui_factory.show_warning(gettext('unable to remove %s') % path)
def _note_lock(self, lock_type): if 'relock' in debug.debug_flags and self._prev_lock == lock_type: if lock_type == 'r': type_name = 'read' else: type_name = 'write' trace.note(gettext('{0!r} was {1} locked again'), self, type_name) self._prev_lock = lock_type
def completed(self, revno, rev_id): self._note(gettext('Committed revision %d.'), revno) # self._note goes to the console too; so while we want to log the # rev_id, we can't trivially only log it. (See bug 526425). Long # term we should rearrange the reporting structure, but for now # we just mutter seperately. We mutter the revid and revno together # so that concurrent bzr invocations won't lead to confusion. mutter('Committed revid %s as revno %d.', rev_id, revno)
def _find_merged_revno(self, revision, b, pb): if revision is None: return b.revno() pb.update(gettext('Finding revision-id')) revision_id = revision[0].as_revision_id(b) # a revno spec is necessarily on the mainline. if self._is_revno_spec(revision[0]): merging_revision = revision_id else: graph = b.repository.get_graph() pb.update(gettext('Finding merge')) merging_revision = graph.find_lefthand_merger( revision_id, b.last_revision()) if merging_revision is None: raise InvalidRevisionSpec(revision[0].user_spec, b) pb.update(gettext('Finding revno')) return b.revision_id_to_revno(merging_revision)
def _find_proposals(self, revision_id, pb): from bzrlib.plugins.launchpad import (lp_api, lp_registration) # "devel" because branches.getMergeProposals is not part of 1.0 API. launchpad = lp_api.login(lp_registration.LaunchpadService(), version='devel') pb.update(gettext('Finding proposals')) return list( launchpad.branches.getMergeProposals(merged_revision=revision_id))
def _find_proposals(self, revision_id, pb): from bzrlib.plugins.launchpad import (lp_api, lp_registration) # "devel" because branches.getMergeProposals is not part of 1.0 API. launchpad = lp_api.login(lp_registration.LaunchpadService(), version='devel') pb.update(gettext('Finding proposals')) return list(launchpad.branches.getMergeProposals( merged_revision=revision_id))
def _gc_inventory(self): """Remove inventories that are not referenced from the revision store.""" self.pb.update(gettext('Checking unused inventories'), 0, 1) self._check_garbage_inventories() self.pb.update(gettext('Checking unused inventories'), 1, 3) if not self.garbage_inventories: ui.ui_factory.note(gettext('Inventory ok.')) return self.pb.update(gettext('Backing up inventory'), 0, 0) self.repo._backup_inventory() ui.ui_factory.note(gettext('Backup Inventory created')) # asking for '' should never return a non-empty weave new_inventories = self.repo._temp_inventories() # we have topological order of revisions and non ghost parents ready. graph = self.revisions.get_parent_map(self.revisions.keys()) revision_keys = topo_sort(graph) revision_ids = [key[-1] for key in revision_keys] self._setup_steps(len(revision_keys)) stream = self._change_inv_parents( self.inventory.get_record_stream(revision_keys, 'unordered', True), graph.__getitem__, set(revision_keys)) new_inventories.insert_record_stream(stream) # if this worked, the set of new_inventory_vf.names should equal # the revisionds list if not (set(new_inventories.keys()) == set(revision_keys)): raise AssertionError() self.pb.update(gettext('Writing weave')) self.repo._activate_new_inventory() self.inventory = None ui.ui_factory.note(gettext('Inventory regenerated.'))
def _gc_inventory(self): """Remove inventories that are not referenced from the revision store.""" self.pb.update(gettext('Checking unused inventories'), 0, 1) self._check_garbage_inventories() self.pb.update(gettext('Checking unused inventories'), 1, 3) if not self.garbage_inventories: ui.ui_factory.note(gettext('Inventory ok.')) return self.pb.update(gettext('Backing up inventory'), 0, 0) self.repo._backup_inventory() ui.ui_factory.note(gettext('Backup Inventory created')) # asking for '' should never return a non-empty weave new_inventories = self.repo._temp_inventories() # we have topological order of revisions and non ghost parents ready. graph = self.revisions.get_parent_map(self.revisions.keys()) revision_keys = topo_sort(graph) revision_ids = [key[-1] for key in revision_keys] self._setup_steps(len(revision_keys)) stream = self._change_inv_parents( self.inventory.get_record_stream(revision_keys, 'unordered', True), graph.__getitem__, set(revision_keys)) new_inventories.insert_record_stream(stream) # if this worked, the set of new_inventory_vf.names should equal # the revisionds list if not(set(new_inventories.keys()) == set(revision_keys)): raise AssertionError() self.pb.update(gettext('Writing weave')) self.repo._activate_new_inventory() self.inventory = None ui.ui_factory.note(gettext('Inventory regenerated.'))
def run(self, revision=None): from bzrlib import ui from bzrlib.plugins.launchpad import lp_api import webbrowser b = _mod_branch.Branch.open_containing('.')[0] pb = ui.ui_factory.nested_progress_bar() b.lock_read() try: revno = self._find_merged_revno(revision, b, pb) merged = self._find_proposals(revno, b, pb) if len(merged) == 0: raise BzrCommandError(gettext('No review found.')) trace.note(gettext('%d proposals(s) found.') % len(merged)) for mp in merged: webbrowser.open(lp_api.canonical_url(mp)) finally: b.unlock() pb.finished()
def serve(self, thread_name_suffix=''): # Note: There is a temptation to do # signals.register_on_hangup(id(self), self._stop_gracefully) # However, that creates a temporary object which is a bound # method. signals._on_sighup is a WeakKeyDictionary so it # immediately gets garbage collected, because nothing else # references it. Instead, we need to keep a real reference to the # bound method for the lifetime of the serve() function. stop_gracefully = self._stop_gracefully signals.register_on_hangup(id(self), stop_gracefully) self._should_terminate = False # for hooks we are letting code know that a server has started (and # later stopped). self.run_server_started_hooks() self._started.set() try: try: while not self._should_terminate: try: conn, client_addr = self._server_socket.accept() except self._socket_timeout: # just check if we're asked to stop pass except self._socket_error, e: # if the socket is closed by stop_background_thread # we might get a EBADF here, or if we get a signal we # can get EINTR, any other socket errors should get # logged. if e.args[0] not in (errno.EBADF, errno.EINTR): trace.warning( gettext("listening socket error: %s") % (e, )) else: if self._should_terminate: conn.close() break self.serve_conn(conn, thread_name_suffix) # Cleanout any threads that have finished processing. self._poll_active_connections() except KeyboardInterrupt: # dont log when CTRL-C'd. raise except Exception, e: trace.report_exception(sys.exc_info(), sys.stderr) raise finally: try: # ensure the server socket is closed. self._server_socket.close() except self._socket_error: # ignore errors on close pass self._stopped.set() signals.unregister_on_hangup(id(self)) self.run_server_stopped_hooks() if self._gracefully_stopping: self._wait_for_clients_to_disconnect() self._fully_stopped.set()
def serve(self, thread_name_suffix=''): # Note: There is a temptation to do # signals.register_on_hangup(id(self), self._stop_gracefully) # However, that creates a temporary object which is a bound # method. signals._on_sighup is a WeakKeyDictionary so it # immediately gets garbage collected, because nothing else # references it. Instead, we need to keep a real reference to the # bound method for the lifetime of the serve() function. stop_gracefully = self._stop_gracefully signals.register_on_hangup(id(self), stop_gracefully) self._should_terminate = False # for hooks we are letting code know that a server has started (and # later stopped). self.run_server_started_hooks() self._started.set() try: try: while not self._should_terminate: try: conn, client_addr = self._server_socket.accept() except self._socket_timeout: # just check if we're asked to stop pass except self._socket_error, e: # if the socket is closed by stop_background_thread # we might get a EBADF here, or if we get a signal we # can get EINTR, any other socket errors should get # logged. if e.args[0] not in (errno.EBADF, errno.EINTR): trace.warning(gettext("listening socket error: %s") % (e,)) else: if self._should_terminate: conn.close() break self.serve_conn(conn, thread_name_suffix) # Cleanout any threads that have finished processing. self._poll_active_connections() except KeyboardInterrupt: # dont log when CTRL-C'd. raise except Exception, e: trace.report_exception(sys.exc_info(), sys.stderr) raise finally: try: # ensure the server socket is closed. self._server_socket.close() except self._socket_error: # ignore errors on close pass self._stopped.set() signals.unregister_on_hangup(id(self)) self.run_server_stopped_hooks() if self._gracefully_stopping: self._wait_for_clients_to_disconnect() self._fully_stopped.set()
def _reconcile_branch(self): try: self.branch = self.bzrdir.open_branch() except errors.NotBranchError: # Nothing to check here self.fixed_branch_history = None return ui.ui_factory.note(gettext('Reconciling branch %s') % self.branch.base) branch_reconciler = self.branch.reconcile(thorough=True) self.fixed_branch_history = branch_reconciler.fixed_history
def run(self, location=None, dry_run=False): from bzrlib.plugins.launchpad.lp_registration import (LaunchpadService) if location is None: location = u'.' web_url = self._get_web_url(LaunchpadService(), location) trace.note(gettext('Opening %s in web browser') % web_url) if not dry_run: import webbrowser # this import should not be lazy # otherwise bzr.exe lacks this module webbrowser.open(web_url)
def update_lp(self): """Update the Launchpad copy of this branch.""" if not self._check_update: return self.bzr.lock_read() try: if self.lp.last_scanned_id is not None: if self.bzr.last_revision() == self.lp.last_scanned_id: trace.note(gettext('%s is already up-to-date.') % self.lp.bzr_identity) return graph = self.bzr.repository.get_graph() if not graph.is_ancestor(self.lp.last_scanned_id, self.bzr.last_revision()): raise errors.DivergedBranches(self.bzr, self.push_bzr) trace.note(gettext('Pushing to %s') % self.lp.bzr_identity) self.bzr.push(self.push_bzr) finally: self.bzr.unlock()
def get_cmd_object(cmd_name, plugins_override=True): """Return the command object for a command. plugins_override If true, plugin commands can override builtins. """ try: return _get_cmd_object(cmd_name, plugins_override) except KeyError: raise errors.BzrCommandError(gettext('unknown command "%s"') % cmd_name)
def apply_lsprofiled(filename, the_callable, *args, **kwargs): from bzrlib.lsprof import profile ret, stats = profile(exception_to_return_code, the_callable, *args, **kwargs) stats.sort() if filename is None: stats.pprint() else: stats.save(filename) trace.note(gettext('Profile data written to "%s".'), filename) return ret