def get_or_create_bucket(name): if not get_dryrun(): conn = boto.connect_s3() bucket = conn.create_bucket(name) return bucket else: print('boto.connect_s3().create_bucket(%s)' % repr(name))
def truncate(): """ Compacts all deployment records into a single initial deployment. """ reset() if not common.get_dryrun(): fabric.api.execute(thumbprint, hosts=env.hosts)
def thumbprint(self, data): assert isinstance(data, dict) if not common.get_dryrun(): fout = open_file(self.get_thumbprint_filename(env.host_string), 'w') yaml.dump(data, fout, default_flow_style=False, indent=4) fout.flush()
def delete(name=None, group=None, release=None, except_release=None, dryrun=1, verbose=1): """ Permanently erase one or more VM instances from existence. """ verbose = int(verbose) if env.vm_type == EC2: conn = get_ec2_connection() instances = list_instances( name=name, group=group, release=release, except_release=except_release, ) for instance_name, instance_data in instances.items(): public_dns_name = instance_data['public_dns_name'] print('\nDeleting %s (%s)...' \ % (instance_name, instance_data['id'])) if not get_dryrun(): conn.terminate_instances(instance_ids=[instance_data['id']]) # Clear host key on localhost. known_hosts = os.path.expanduser('~/.ssh/known_hosts') cmd = 'ssh-keygen -f "%s" -R %s' % (known_hosts, public_dns_name) local_or_dryrun(cmd) else: raise NotImplementedError
def stop(service=''): """ Stop a service. :: import burlap # Stop service if it is running if burlap.service.is_running('foo'): burlap.service.stop('foo') """ ran = False service = service.strip().lower() for _service in env.services: _service = _service.strip().upper() if service and _service.lower() != service: continue funcs = common.service_stoppers.get(_service) if funcs: print('Restarting service %s...' % (_service, )) for func in funcs: func() ran = True if not ran and not get_dryrun() and service: _run_service(service, 'stop')
def runs_once(meth): """ A wrapper around Fabric's runs_once() to support our dryrun feature. """ from burlap.common import get_dryrun, runs_once_methods if get_dryrun(): pass else: runs_once_methods.append(meth) _runs_once(meth) return meth
def restart(service=''): """ Restart a service. :: import burlap # Start service, or restart it if it is already running if burlap.service.is_running('foo'): burlap.service.restart('foo') else: burlap.service.start('foo') """ service = service.strip().lower() _ran = False for _service in env.services: _service = _service.strip().upper() if service and _service.lower() != service: continue srv = common.services.get(_service) if srv: srv.restart() _ran = True continue funcs = common.service_restarters.get(_service) if funcs: print('Restarting service %s...' % (_service, )) for func in funcs: if not get_dryrun(): func() _ran = True if not get_dryrun() and not _ran and service: _run_service(service, 'restart')
def deploy(): """ Applies routine, typically application-level changes to the service. """ for service in env.services: service = service.strip().upper() funcs = common.service_deployers.get(service) if funcs: print('Deploying service %s...' % (service, )) for func in funcs: if not get_dryrun(): func()
def configure(): """ Applies one-time settings changes to the host, usually to initialize the service. """ print('env.services:', env.services) for service in list(env.services): service = service.strip().upper() funcs = common.service_configurators.get(service, []) if funcs: print('!' * 80) print('Configuring service %s...' % (service, )) for func in funcs: print('Function:', func) if not get_dryrun(): func()
def get_or_create_distribution(s3_bucket): if not get_dryrun(): conn = boto.connect_cloudfront() origin_dns = '%s.s3.amazonaws.com' % s3_bucket.name origin = boto.cloudfront.origin\ .S3Origin(origin_dns) # origin = boto.cloudfront.origin\ # .S3Origin(s3_bucket.get_website_endpoint()) distro = None dists = conn.get_all_distributions() for d in dists: if origin_dns == d.get_distribution().config.origin.dns_name: distro = d break if not distro: distro = conn.create_distribution(origin=origin, enabled=True) return distro else: print('boto.connect_cloudfront().create_distribution(%s)' % repr(name))
def execute(self, i=None, j=None): i = i or self.index steps_ran = [] for step_i, step in enumerate(self.steps): if step_i < i: continue # Run command. t0 = datetime.datetime.utcnow().isoformat() step.execute() t1 = datetime.datetime.utcnow().isoformat() steps_ran.append(step) # Record success. if not common.get_dryrun(): self.index = step_i + 1 self.add_history(self.index, t0, t1) if j is not None and step_i >= j: break return steps_ran
def auto(fake=0, preview=0, check_outstanding=1, components=None, explain=0): """ Generates a plan based on the components that have changed since the last deployment. The overall steps ran for each host: 1. create plan 2. run plan 3. create thumbprint fake := If true, generates the plan and records the run as successful, but does not apply any changes to the hosts. components := list of names of components found in the services list """ explain = int(explain) only_components = components or [] if isinstance(only_components, basestring): only_components = [_.strip().upper() for _ in only_components.split(',') if _.strip()] if only_components: print('Limiting deployment to components: %s' % only_components) def get_deploy_funcs(components): for component in components: if only_components and component not in only_components: continue funcs = common.manifest_deployers.get(component, []) for func_name in funcs: #TODO:remove this after burlap.* naming prefix bug fixed if func_name.startswith('burlap.'): print('skipping %s' % func_name) continue takes_diff = common.manifest_deployers_takes_diff.get(func_name, False) # print(func_name, takes_diff) if preview: #print(success((' '*4)+func_name)) #continue yield func_name, None else: func = common.resolve_deployer(func_name) last, current = component_thumbprints[component] if not fake: if takes_diff: yield func_name, functools.partial(func, last=last, current=current) else: yield func_name, functools.partial(func) verbose = common.get_verbose() fake = int(fake) preview = int(preview) check_outstanding = int(check_outstanding) all_services = set(_.strip().upper() for _ in env.services) if verbose: print('&'*80) print('services:', env.services) last_plan = get_last_completed_plan() outstanding = has_outstanding_plans() if verbose: print('outstanding plans:', outstanding) if check_outstanding and outstanding: print(fail(( 'There are outstanding plans pending execution! ' 'Run `fab %s deploy.status` for details.\n' 'To ignore these, re-run with :check_outstanding=0.' ) % env.ROLE)) sys.exit(1) if verbose: print('iter_thumbprint_differences') diffs = list(iter_thumbprint_differences(only_components=only_components)) if diffs: if verbose: print('Differences detected!') # Create plan. components = set() component_thumbprints = {} for component, (last, current) in diffs: if component not in all_services: print('ignoring component:', component) continue # if only_components and component not in only_components: # continue component_thumbprints[component] = last, current components.add(component) component_dependences = {} if verbose: print('all_services:', all_services) print('manifest_deployers_befores:', common.manifest_deployers_befores.keys()) print('*'*80) print('all components:', components) all_components = set(common.all_satchels) if only_components and not all_components.issuperset(only_components): unknown_components = set(only_components).difference(all_components) raise Exception('Unknown components: %s' \ % ', '.join(sorted(unknown_components))) for _c in components: if verbose: print('checking:',_c) deps = set(common.manifest_deployers_befores.get(_c, [])) if verbose: print('deps0:',deps) deps = deps.intersection(components) if verbose: print('deps1:',deps) component_dependences[_c] = deps if verbose: print('dependencies:') for _c in component_dependences: print(_c, component_dependences[_c]) components = list(common.topological_sort(component_dependences.items())) # print('components:',components) # raw_input('enter') plan_funcs = list(get_deploy_funcs(components)) if components and plan_funcs: print('These components have changed:\n') for component in sorted(components): print((' '*4)+component) print('\nDeployment plan:\n') for func_name, _ in plan_funcs: print(success((' '*4)+func_name)) else: print('Nothing to do!') return False # Execute plan. if preview: print('\nTo execute this plan on all hosts run:\n\n fab %s deploy.run' % env.ROLE) return components, plan_funcs else: # raw_input('enter') with open('/tmp/burlap.progress', 'w') as fout: print('%s Beginning plan execution!' % (datetime.datetime.now(),), file=fout) fout.flush() for func_name, plan_func in plan_funcs: print('%s Executing step %s...' % (datetime.datetime.now(), func_name)) print('%s Executing step %s...' % (datetime.datetime.now(), func_name), file=fout) fout.flush() # raw_input('enter'); continue if callable(plan_func): plan_func() print('%s Done!' % (datetime.datetime.now(),), file=fout) fout.flush() print('%s Plan execution complete!' % (datetime.datetime.now(),), file=fout) fout.flush() # raw_input('final') # Create thumbprint. if not common.get_dryrun(): plan = Plan.get_or_create_next(last_plan=last_plan) plan.record_thumbprint(only_components=only_components)
def auto(fake=0, preview=0, check_outstanding=1, components=None, explain=0): """ Generates a plan based on the components that have changed since the last deployment. The overall steps ran for each host: 1. create plan 2. run plan 3. create thumbprint fake := If true, generates the plan and records the run as successful, but does not apply any changes to the hosts. components := list of names of components found in the services list """ explain = int(explain) only_components = components or [] if isinstance(only_components, basestring): only_components = [ _.strip().upper() for _ in only_components.split(',') if _.strip() ] if only_components: print('Limiting deployment to components: %s' % only_components) def get_deploy_funcs(components): for component in components: if only_components and component not in only_components: continue funcs = common.manifest_deployers.get(component, []) for func_name in funcs: #TODO:remove this after burlap.* naming prefix bug fixed if func_name.startswith('burlap.'): print('skipping %s' % func_name) continue takes_diff = common.manifest_deployers_takes_diff.get( func_name, False) # print(func_name, takes_diff) if preview: #print(success((' '*4)+func_name)) #continue yield func_name, None else: func = common.resolve_deployer(func_name) last, current = component_thumbprints[component] if not fake: if takes_diff: yield func_name, functools.partial(func, last=last, current=current) else: yield func_name, functools.partial(func) verbose = common.get_verbose() fake = int(fake) preview = int(preview) check_outstanding = int(check_outstanding) all_services = set(_.strip().upper() for _ in env.services) if verbose: print('&' * 80) print('services:', env.services) last_plan = get_last_completed_plan() outstanding = has_outstanding_plans() if verbose: print('outstanding plans:', outstanding) if check_outstanding and outstanding: print( fail(('There are outstanding plans pending execution! ' 'Run `fab %s deploy.status` for details.\n' 'To ignore these, re-run with :check_outstanding=0.') % env.ROLE)) sys.exit(1) if verbose: print('iter_thumbprint_differences') diffs = list(iter_thumbprint_differences(only_components=only_components)) if diffs: if verbose: print('Differences detected!') # Create plan. components = set() component_thumbprints = {} for component, (last, current) in diffs: if component not in all_services: print('ignoring component:', component) continue # if only_components and component not in only_components: # continue component_thumbprints[component] = last, current components.add(component) component_dependences = {} if verbose: print('all_services:', all_services) print('manifest_deployers_befores:', common.manifest_deployers_befores.keys()) print('*' * 80) print('all components:', components) all_components = set(common.all_satchels) if only_components and not all_components.issuperset(only_components): unknown_components = set(only_components).difference(all_components) raise Exception('Unknown components: %s' \ % ', '.join(sorted(unknown_components))) for _c in components: if verbose: print('checking:', _c) deps = set(common.manifest_deployers_befores.get(_c, [])) if verbose: print('deps0:', deps) deps = deps.intersection(components) if verbose: print('deps1:', deps) component_dependences[_c] = deps if verbose: print('dependencies:') for _c in component_dependences: print(_c, component_dependences[_c]) components = list(common.topological_sort(component_dependences.items())) # print('components:',components) # raw_input('enter') plan_funcs = list(get_deploy_funcs(components)) if components and plan_funcs: print('These components have changed:\n') for component in sorted(components): print((' ' * 4) + component) print('\nDeployment plan:\n') for func_name, _ in plan_funcs: print(success((' ' * 4) + func_name)) else: print('Nothing to do!') return False # Execute plan. if preview: print( '\nTo execute this plan on all hosts run:\n\n fab %s deploy.run' % env.ROLE) return components, plan_funcs else: # raw_input('enter') with open('/tmp/burlap.progress', 'w') as fout: print('%s Beginning plan execution!' % (datetime.datetime.now(), ), file=fout) fout.flush() for func_name, plan_func in plan_funcs: print('%s Executing step %s...' % (datetime.datetime.now(), func_name)) print('%s Executing step %s...' % (datetime.datetime.now(), func_name), file=fout) fout.flush() # raw_input('enter'); continue if callable(plan_func): plan_func() print('%s Done!' % (datetime.datetime.now(), ), file=fout) fout.flush() print('%s Plan execution complete!' % (datetime.datetime.now(), ), file=fout) fout.flush() # raw_input('final') # Create thumbprint. if not common.get_dryrun(): plan = Plan.get_or_create_next(last_plan=last_plan) plan.record_thumbprint(only_components=only_components)
def load(db_dump_fn='', prep_only=0, force_upload=0, from_local=0): """ Restores a database snapshot onto the target database server. If prep_only=1, commands for preparing the load will be generated, but not the command to finally load the snapshot. """ verbose = common.get_verbose() from burlap.dj import set_db from burlap.common import get_dryrun if not db_dump_fn: db_dump_fn = get_default_db_fn() env.db_dump_fn = render_fn(db_dump_fn).strip() set_db(site=env.SITE, role=env.ROLE) from_local = int(from_local) prep_only = int(prep_only) # Copy snapshot file to target. missing_local_dump_error = ( "Database dump file %(db_dump_fn)s does not exist." ) % env if env.is_local: env.db_remote_dump_fn = db_dump_fn else: env.db_remote_dump_fn = '/tmp/'+os.path.split(env.db_dump_fn)[-1] if not prep_only: if int(force_upload) or (not get_dryrun() and not env.is_local and not files.exists(env.db_remote_dump_fn)): assert os.path.isfile(env.db_dump_fn), \ missing_local_dump_error if verbose: print('Uploading database snapshot...') put_or_dryrun(local_path=env.db_dump_fn, remote_path=env.db_remote_dump_fn) if env.is_local and not prep_only and not get_dryrun(): assert os.path.isfile(env.db_dump_fn), \ missing_local_dump_error if env.db_load_command: cmd = env.db_load_command % env run_or_dryrun(cmd) elif 'postgres' in env.db_engine or 'postgis' in env.db_engine: set_root_login() with settings(warn_only=True): cmd = 'dropdb --user=%(db_postgresql_postgres_user)s %(db_name)s' % env run_or_dryrun(cmd) cmd = 'psql --user=%(db_postgresql_postgres_user)s -c "CREATE DATABASE %(db_name)s;"' % env run_or_dryrun(cmd) with settings(warn_only=True): if 'postgis' in env.db_engine: cmd = 'psql --user=%(db_postgresql_postgres_user)s --no-password --dbname=%(db_name)s --command="CREATE EXTENSION postgis;"' % env run_or_dryrun(cmd) cmd = 'psql --user=%(db_postgresql_postgres_user)s --no-password --dbname=%(db_name)s --command="CREATE EXTENSION postgis_topology;"' % env run_or_dryrun(cmd) cmd = 'psql --user=%(db_postgresql_postgres_user)s -c "DROP OWNED BY %(db_user)s CASCADE;"' % env run_or_dryrun(cmd) cmd = ('psql --user=%(db_postgresql_postgres_user)s -c "DROP USER IF EXISTS %(db_user)s; ' 'CREATE USER %(db_user)s WITH PASSWORD \'%(db_password)s\'; ' 'GRANT ALL PRIVILEGES ON DATABASE %(db_name)s to %(db_user)s;"') % env run_or_dryrun(cmd) for createlang in env.db_postgresql_createlangs: env.db_createlang = createlang cmd = 'createlang -U %(db_postgresql_postgres_user)s %(db_createlang)s %(db_name)s || true' % env run_or_dryrun(cmd) if not prep_only: #cmd = 'gunzip -c %(db_remote_dump_fn)s | pg_restore --jobs=8 -U %(db_postgresql_postgres_user)s --create --dbname=%(db_name)s' % env #TODO:deprecated #cmd = 'gunzip -c %(db_remote_dump_fn)s | pg_restore -U %(db_postgresql_postgres_user)s --create --dbname=%(db_name)s' % env #TODO:deprecated if env.db_postgresql_custom_load_cmd: cmd = env.db_postgresql_custom_load_cmd % env else: cmd = 'pg_restore --jobs=8 -U %(db_postgresql_postgres_user)s --create --dbname=%(db_name)s %(db_remote_dump_fn)s' % env run_or_dryrun(cmd) elif 'mysql' in env.db_engine: set_root_login() # Drop the database if it's there. #cmd = ("mysql -v -h %(db_host)s -u %(db_user)s -p'%(db_password)s' " cmd = ("mysql -v -h %(db_host)s -u %(db_root_user)s -p'%(db_root_password)s' " "--execute='DROP DATABASE IF EXISTS %(db_name)s'") % env run_or_dryrun(cmd) # Now, create the database. #cmd = ("mysqladmin -h %(db_host)s -u %(db_user)s -p'%(db_password)s' " cmd = ("mysqladmin -h %(db_host)s -u %(db_root_user)s -p'%(db_root_password)s' " "create %(db_name)s") % env run_or_dryrun(cmd) # Create user with settings(warn_only=True): cmd = ("mysql -v -h %(db_host)s -u %(db_root_user)s -p'%(db_root_password)s' " "--execute=\"CREATE USER '%(db_user)s'@'%%' IDENTIFIED BY '%(db_password)s'; GRANT ALL PRIVILEGES ON *.* TO '%(db_user)s'@'%%' WITH GRANT OPTION; FLUSH PRIVILEGES;\"") % env run_or_dryrun(cmd) # DROP USER '<username>'@'%'; # CREATE USER '<username>'@'%' IDENTIFIED BY '<password>'; # GRANT ALL PRIVILEGES ON *.* TO '<username>'@'%' WITH GRANT OPTION; # FLUSH PRIVILEGES; # Set collation. # cmd = ("mysql -v -h %(db_host)s -u %(db_root_user)s -p'%(db_root_password)s' " # "--execute='ALTER DATABASE %(db_name)s CHARACTER SET %(db_mysql_character_set)s COLLATE %(db_mysql_collate)s;'") % env set_collation_mysql() # Raise max packet limitation. # run_or_dryrun( # ('mysql -v -h %(db_host)s -D %(db_name)s -u %(db_root_user)s ' # '-p"%(db_root_password)s" --execute="SET global ' # 'net_buffer_length=%(db_mysql_net_buffer_length)s; SET global ' # 'max_allowed_packet=%(db_mysql_max_allowed_packet)s;"') % env) set_max_mysql_packet_size(do_set=0) # Run any server-specific commands (e.g. to setup permissions) before # we load the data. for command in env.db_mysql_preload_commands: run_or_dryrun(command % env) # Restore the database content from the dump file. env.db_dump_fn = db_dump_fn cmd = ('gunzip < %(db_remote_dump_fn)s | mysql -u %(db_root_user)s ' '--password=%(db_root_password)s --host=%(db_host)s ' '-D %(db_name)s') % env run_or_dryrun(cmd) set_collation_mysql() else: raise NotImplemented
def setUp(self): from burlap import deploy, manifest from burlap.deploy import deploy as deploy_satchel # Always print the current test name before the test. # _, columns = map(int, os.popen('stty size', 'r').read().split()) # TODO:fix? broke in Ubuntu16+Python3 columns = 80 kwargs = dict( bar='#' * columns, name=self._testMethodName, ) print(self.test_name_format.format(**kwargs), file=self.test_name_fout) # Save fabric state. self._env = env.copy() # print('before env clear:') # pprint(env, indent=4) # Reset fabric state. #self.clear_env() #self.update_env(default_env) print('setUp: initializing env...') init_env() #deploy_init_env() if not env.host_string: env.host_string = 'localhost' env.hosts = [env.host_string] # Save cwd. self._cwd = os.getcwd() print('cwd:', self._cwd) # Save burlap state. print('setUp: Saving burlap state...') self._burlap_state = get_state() self._dryrun = get_dryrun() self._verbose = get_verbose() # Clear runs_once on legacy runs_once methods. print('setUp: Clearing runs_once methods...') modules = [deploy, deploy_satchel, manifest] for module in modules: print('setUp: Checking module:', module) for name in dir(module): print('setUp: Checking name:', name) #func = getattr(module, name) #if not callable(func): if not is_callable(module, name): continue func = getattr(module, name) print('clearing:', func) clear_runs_once(func) # Clear runs_once on our custom runs_once methods. print('setUp: Clearing custom runs_once methods...') from burlap.common import runs_once_methods for meth in runs_once_methods: clear_runs_once(func) # Ensure all satchels re-push all their local variables back into the global env. print('setUp: Clearing satchels...') for satchel in all_satchels.values(): satchel.register() satchel.clear_caches() # Set satchel variables that should be customized just for unittests. # For example, so we can run unittests locally, we want to change the default burlap paths so they don't conflict with the defaults, # in case we're using burlap to deploy locally. deploy_satchel.env.lockfile_path = '/tmp/burlap_unittests/deploy.lock' deploy_satchel.env.data_dir = '/tmp/burlap_unittests' # Since these tests are automated, if we ever get a prompt, we should immediately fail, # because no action should ever be user-interactive. env.abort_on_prompts = True env.always_use_pty = False print('setUp: Purging deployments...') #delete_plan_data_dir() deploy_satchel.purge() #clear_fs_cache() super(TestCase, self).setUp()
def update_tickets_from_git(last=None, current=None): """ Run during a deployment. Looks at all commits between now and the last deployment. Finds all ticket numbers and updates their status in Jira. """ from jira import JIRA, JIRAError from burlap.deploy import get_last_current_diffs from burlap.git import gittracker get_current_commit = gittracker.get_current_commit GITTRACKER = gittracker.name.upper() dryrun = common.get_dryrun() verbose = common.get_verbose() # Ensure this is only run once per role. if env.host_string != env.hosts[-1]: return if not env.jira_update_from_git: return if not env.jira_ticket_pattern: return if not env.jira_basic_auth_username or not env.jira_basic_auth_password: return # During a deployment, we should be given these, but for testing, # lookup the diffs dynamically. if not last or not current: last, current = get_last_current_diffs(GITTRACKER) if verbose: print('-' * 80) print('last.keys:', last.keys()) print('-' * 80) print('current.keys:', current.keys()) try: last_commit = last['GITTRACKER']['current_commit'] except KeyError: return current_commit = current['GITTRACKER']['current_commit'] # Find all tickets deployed between last deployment and now. tickets = get_tickets_between_commits(current_commit, last_commit) if verbose: print('tickets:', tickets) # Update all tickets in Jira. jira = JIRA({'server': env.jira_server}, basic_auth=(env.jira_basic_auth_username, env.jira_basic_auth_password)) for ticket in tickets: # Mention this Jira updated. comment = env.jira_ticket_update_message_template % dict( role=env.ROLE.lower()) print('Commenting on ticket %s: %s' % (ticket, comment)) if not dryrun: jira.add_comment(ticket, comment) # Update ticket status. recheck = False while 1: print('Looking up jira ticket %s...' % ticket) issue = jira.issue(ticket) print('Ticket %s retrieved.' % ticket) transition_to_id = dict( (t['name'], t['id']) for t in jira.transitions(issue)) print('%i allowable transitions found: %s' % (len(transition_to_id), ', '.join(transition_to_id.keys()))) next_transition_name = env.jira_deploy_workflow.get( issue.fields.status.name.title()) next_transition_id = transition_to_id.get(next_transition_name) if next_transition_name: new_fields = {} # print('jira_assignee_by_status:', env.jira_assignee_by_status, issue.fields.status.name.title() new_assignee = env.jira_assignee_by_status.get( #issue.fields.status.name.title(), next_transition_name, issue.fields.assignee.name, ) if new_assignee == 'reporter': new_assignee = issue.fields.reporter.name # print('new_assignee:', new_assignee) print('Updating ticket %s to status %s and assigning it to %s.' \ % (ticket, next_transition_name, new_assignee)) if not dryrun: try: jira.transition_issue( issue, next_transition_id, ) recheck = True except AttributeError as e: print('Unable to transition ticket %s to %s: %s' \ % (ticket, next_transition_name, e), file=sys.stderr) # Note assignment should happen after transition, since the assignment may # effect remove transitions that we need. try: if new_assignee: print('Assigning ticket %s to %s.' % (ticket, new_assignee)) jira.assign_issue(issue, new_assignee) else: print('No new assignee found.') except JIRAError as e: print('Unable to reassign ticket %s to %s: %s' \ % (ticket, new_assignee, e), file=sys.stderr) else: recheck = False print('No transitions found for ticket %s currently in status "%s".' \ % (ticket, issue.fields.status.name)) if not recheck: break
def update_tickets_from_git(last=None, current=None): """ Run during a deployment. Looks at all commits between now and the last deployment. Finds all ticket numbers and updates their status in Jira. """ from jira import JIRA, JIRAError from burlap.deploy import get_last_current_diffs from burlap.git import gittracker get_current_commit = gittracker.get_current_commit GITTRACKER = gittracker.name.upper() dryrun = common.get_dryrun() verbose = common.get_verbose() # Ensure this is only run once per role. if env.host_string != env.hosts[-1]: return if not env.jira_update_from_git: return if not env.jira_ticket_pattern: return if not env.jira_basic_auth_username or not env.jira_basic_auth_password: return # During a deployment, we should be given these, but for testing, # lookup the diffs dynamically. if not last or not current: last, current = get_last_current_diffs(GITTRACKER) if verbose: print('-'*80) print('last.keys:', last.keys()) print('-'*80) print('current.keys:', current.keys()) try: last_commit = last['GITTRACKER']['current_commit'] except KeyError: return current_commit = current['GITTRACKER']['current_commit'] # Find all tickets deployed between last deployment and now. tickets = get_tickets_between_commits(current_commit, last_commit) if verbose: print('tickets:', tickets) # Update all tickets in Jira. jira = JIRA({ 'server': env.jira_server }, basic_auth=(env.jira_basic_auth_username, env.jira_basic_auth_password)) for ticket in tickets: # Mention this Jira updated. comment = env.jira_ticket_update_message_template % dict(role=env.ROLE.lower()) print('Commenting on ticket %s: %s' % (ticket, comment)) if not dryrun: jira.add_comment(ticket, comment) # Update ticket status. recheck = False while 1: print('Looking up jira ticket %s...' % ticket) issue = jira.issue(ticket) print('Ticket %s retrieved.' % ticket) transition_to_id = dict((t['name'], t['id']) for t in jira.transitions(issue)) print('%i allowable transitions found: %s' % (len(transition_to_id), ', '.join(transition_to_id.keys()))) next_transition_name = env.jira_deploy_workflow.get(issue.fields.status.name.title()) next_transition_id = transition_to_id.get(next_transition_name) if next_transition_name: new_fields = {} # print('jira_assignee_by_status:', env.jira_assignee_by_status, issue.fields.status.name.title() new_assignee = env.jira_assignee_by_status.get( #issue.fields.status.name.title(), next_transition_name, issue.fields.assignee.name, ) if new_assignee == 'reporter': new_assignee = issue.fields.reporter.name # print('new_assignee:', new_assignee) print('Updating ticket %s to status %s and assigning it to %s.' \ % (ticket, next_transition_name, new_assignee)) if not dryrun: try: jira.transition_issue( issue, next_transition_id, ) recheck = True except AttributeError as e: print('Unable to transition ticket %s to %s: %s' \ % (ticket, next_transition_name, e), file=sys.stderr) # Note assignment should happen after transition, since the assignment may # effect remove transitions that we need. try: if new_assignee: print('Assigning ticket %s to %s.' % (ticket, new_assignee)) jira.assign_issue(issue, new_assignee) else: print('No new assignee found.') except JIRAError as e: print('Unable to reassign ticket %s to %s: %s' \ % (ticket, new_assignee, e), file=sys.stderr) else: recheck = False print('No transitions found for ticket %s currently in status "%s".' \ % (ticket, issue.fields.status.name)) if not recheck: break