def unlock_targets(job_config): serializer = report.ResultsSerializer(teuth_config.archive_base) job_info = serializer.job_info(job_config['name'], job_config['job_id']) machine_statuses = query.get_statuses(job_info['targets'].keys()) # only unlock/nuke targets if locked and description matches locked = [] for status in machine_statuses: name = shortname(status['name']) description = status['description'] if not status['locked']: continue if description != job_info['archive_path']: log.warning( "Was going to unlock %s but it was locked by another job: %s", name, description) continue locked.append(name) if not locked: return job_status = get_status(job_info) if job_status == 'pass' or \ (job_config.get('unlock_on_failure', False) and not job_config.get('nuke-on-error', False)): log.info('Unlocking machines...') fake_ctx = create_fake_context(job_config) for machine in locked: teuthology.lock.ops.unlock_one(fake_ctx, machine, job_info['owner'], job_info['archive_path']) if job_status != 'pass' and job_config.get('nuke-on-error', False): log.info('Nuking machines...') fake_ctx = create_fake_context(job_config) nuke(fake_ctx, True)
def main(args): if (args['--verbose']): teuthology.log.setLevel(logging.DEBUG) ctx = argparse.Namespace() ctx.os_type = args['--os-type'] ctx.os_version = args['--os-version'] nodes = args['<nodes>'] reimage_types = get_reimage_types() statuses = query.get_statuses(nodes) owner = args['--owner'] or get_user() unlocked = [shortname(_['name']) for _ in statuses if not _['locked']] if unlocked: log.error("Some of the nodes are not locked: %s", unlocked) exit(1) improper = [ shortname(_['name']) for _ in statuses if _['locked_by'] != owner ] if improper: log.error("Some of the nodes are not owned by '%s': %s", owner, improper) exit(1) irreimageable = [ shortname(_['name']) for _ in statuses if _['machine_type'] not in reimage_types ] if irreimageable: log.error( "Following nodes cannot be reimaged because theirs machine type " "is not reimageable: %s", irreimageable) exit(1) def reimage_node(ctx, machine_name, machine_type): ops.update_nodes([machine_name], True) reimage(ctx, machine_name, machine_type) ops.update_nodes([machine_name]) log.debug("Node '%s' reimaging is complete", machine_name) with parallel() as p: for node in statuses: log.debug("Start node '%s' reimaging", node['name']) p.spawn(reimage_node, ctx, shortname(node['name']), node['machine_type'])
def unlock_targets(job_config): serializer = report.ResultsSerializer(teuth_config.archive_base) job_info = serializer.job_info(job_config['name'], job_config['job_id']) machine_status = query.get_statuses(job_info['targets'].keys()) # only unlock/nuke targets if locked in the first place locked = [shortname(_['name']) for _ in machine_status if _['locked']] if not locked: return job_status = get_status(job_info) if job_status == 'pass' or \ (job_config.get('unlock_on_failure', False) and not job_config.get('nuke-on-error', False)): log.info('Unlocking machines...') fake_ctx = create_fake_context(job_config) for machine in locked: teuthology.lock.ops.unlock_one(fake_ctx, machine, job_info['owner'], job_info['archive_path']) if job_status != 'pass' and job_config.get('nuke-on-error', False): log.info('Nuking machines...') fake_ctx = create_fake_context(job_config) nuke(fake_ctx, True)
def main(ctx): if ctx.verbose: teuthology.log.setLevel(logging.DEBUG) set_config_attr(ctx) ret = 0 user = ctx.owner machines = [ misc.canonicalize_hostname(m, user=False) for m in ctx.machines ] machines_to_update = [] if ctx.targets: try: with open(ctx.targets) as f: g = yaml.safe_load_all(f) for new in g: if 'targets' in new: for t in new['targets'].iterkeys(): machines.append(t) except IOError as e: raise argparse.ArgumentTypeError(str(e)) if ctx.f: assert ctx.lock or ctx.unlock, \ '-f is only supported by --lock and --unlock' if machines: assert ctx.lock or ctx.unlock or ctx.list or ctx.list_targets \ or ctx.update or ctx.brief, \ 'machines cannot be specified with that operation' else: if ctx.lock: log.error("--lock requires specific machines passed as arguments") else: # This condition might never be hit, but it's not clear. assert ctx.num_to_lock or ctx.list or ctx.list_targets or \ ctx.summary or ctx.brief, \ 'machines must be specified for that operation' if ctx.all: assert ctx.list or ctx.list_targets or ctx.brief, \ '--all can only be used with --list, --list-targets, and --brief' assert ctx.owner is None, \ '--all and --owner are mutually exclusive' assert not machines, \ '--all and listing specific machines are incompatible' if ctx.num_to_lock: assert ctx.machine_type, \ 'must specify machine type to lock' if ctx.brief or ctx.list or ctx.list_targets: assert ctx.desc is None, '--desc does nothing with --list/--brief' # we may need to update host keys for vms. Don't do it for # every vm; however, update any vms included in the list given # to the CLI (machines), or any owned by the specified owner or # invoking user if no machines are specified. vmachines = [] statuses = query.get_statuses(machines) owner = ctx.owner or misc.get_user() for machine in statuses: if query.is_vm(status=machine) and machine['locked'] and \ (machines or machine['locked_by'] == owner): vmachines.append(machine['name']) if vmachines: log.info("updating host keys for %s", ' '.join(sorted(vmachines))) keys.do_update_keys(vmachines, _raise=False) # get statuses again to refresh any updated keys statuses = query.get_statuses(machines) if statuses: statuses = util.winnow(statuses, ctx.machine_type, 'machine_type') if not machines and ctx.owner is None and not ctx.all: ctx.owner = misc.get_user() statuses = util.winnow(statuses, ctx.owner, 'locked_by') statuses = util.winnow(statuses, ctx.status, 'up', lambda s: s['up'] == (ctx.status == 'up')) statuses = util.winnow( statuses, ctx.locked, 'locked', lambda s: s['locked'] == (ctx.locked == 'true')) statuses = util.winnow(statuses, ctx.desc, 'description') statuses = util.winnow(statuses, ctx.desc_pattern, 'description', lambda s: s['description'] and \ ctx.desc_pattern in s['description']) if ctx.json_query: statuses = util.json_matching_statuses(ctx.json_query, statuses) statuses = util.winnow(statuses, ctx.os_type, 'os_type') statuses = util.winnow(statuses, ctx.os_version, 'os_version') # When listing, only show the vm_host's name, not every detail for s in statuses: if not query.is_vm(status=s): continue # with an OpenStack API, there is no host for a VM if s['vm_host'] is None: continue vm_host_name = s.get('vm_host', dict())['name'] if vm_host_name: s['vm_host'] = vm_host_name if ctx.list: print json.dumps(statuses, indent=4) elif ctx.brief: for s in sorted(statuses, key=lambda s: s.get('name')): locked = "un" if s['locked'] == 0 else " " mo = re.match('\w+@(\w+?)\..*', s['name']) host = mo.group(1) if mo else s['name'] print '{host} {locked}locked {owner} "{desc}"'.format( locked=locked, host=host, owner=s['locked_by'], desc=s['description']) else: frag = {'targets': {}} for f in statuses: frag['targets'][f['name']] = f['ssh_pub_key'] print yaml.safe_dump(frag, default_flow_style=False) else: log.error('error retrieving lock statuses') ret = 1 elif ctx.summary: do_summary(ctx) return 0 elif ctx.lock: if not util.vps_version_or_type_valid(ctx.machine_type, ctx.os_type, ctx.os_version): log.error('Invalid os-type or version detected -- lock failed') return 1 reimage_types = teuthology.provision.fog.get_types() reimage_machines = list() updatekeys_machines = list() for machine in machines: resp = ops.lock_one(machine, user, ctx.desc) if resp.ok: machine_status = resp.json() machine_type = machine_status['machine_type'] if not resp.ok: ret = 1 if not ctx.f: return ret elif not query.is_vm(machine, machine_status): if machine_type in reimage_types: # Reimage in parallel just below here reimage_machines.append(machine) # Update keys last updatekeys_machines = list() else: machines_to_update.append(machine) teuthology.provision.create_if_vm( ctx, misc.canonicalize_hostname(machine), ) with teuthology.parallel.parallel() as p: for machine in reimage_machines: p.spawn(teuthology.provision.reimage, ctx, machine) for machine in updatekeys_machines: keys.do_update_keys([machine]) elif ctx.unlock: if ctx.owner is None and user is None: user = misc.get_user() # If none of them are vpm, do them all in one shot if not filter(query.is_vm, machines): res = ops.unlock_many(machines, user) return 0 if res else 1 for machine in machines: if not ops.unlock_one(ctx, machine, user): ret = 1 if not ctx.f: return ret else: machines_to_update.append(machine) elif ctx.num_to_lock: result = ops.lock_many(ctx, ctx.num_to_lock, ctx.machine_type, user, ctx.desc, ctx.os_type, ctx.os_version, ctx.arch) if not result: ret = 1 else: machines_to_update = result.keys() if ctx.machine_type == 'vps': shortnames = ' '.join([ misc.decanonicalize_hostname(name) for name in result.keys() ]) if len(result) < ctx.num_to_lock: log.error("Locking failed.") for machine in result: ops.unlock_one(ctx, machine, user) ret = 1 else: log.info("Successfully Locked:\n%s\n" % shortnames) log.info("Unable to display keys at this time (virtual " + "machines are booting).") log.info( "Please run teuthology-lock --list-targets %s once " + "these machines come up.", shortnames) else: print yaml.safe_dump(dict(targets=result), default_flow_style=False) elif ctx.update: assert ctx.desc is not None or ctx.status is not None, \ 'you must specify description or status to update' assert ctx.owner is None, 'only description and status may be updated' machines_to_update = machines if ctx.desc is not None or ctx.status is not None: for machine in machines_to_update: ops.update_lock(machine, ctx.desc, ctx.status) return ret
def main(ctx): if ctx.verbose: teuthology.log.setLevel(logging.DEBUG) set_config_attr(ctx) ret = 0 user = ctx.owner machines = [misc.canonicalize_hostname(m, user=False) for m in ctx.machines] machines_to_update = [] if ctx.targets: try: with file(ctx.targets) as f: g = yaml.safe_load_all(f) for new in g: if 'targets' in new: for t in new['targets'].iterkeys(): machines.append(t) except IOError as e: raise argparse.ArgumentTypeError(str(e)) if ctx.f: assert ctx.lock or ctx.unlock, \ '-f is only supported by --lock and --unlock' if machines: assert ctx.lock or ctx.unlock or ctx.list or ctx.list_targets \ or ctx.update or ctx.brief, \ 'machines cannot be specified with that operation' else: if ctx.lock: log.error("--lock requires specific machines passed as arguments") else: # This condition might never be hit, but it's not clear. assert ctx.num_to_lock or ctx.list or ctx.list_targets or \ ctx.summary or ctx.brief, \ 'machines must be specified for that operation' if ctx.all: assert ctx.list or ctx.list_targets or ctx.brief, \ '--all can only be used with --list, --list-targets, and --brief' assert ctx.owner is None, \ '--all and --owner are mutually exclusive' assert not machines, \ '--all and listing specific machines are incompatible' if ctx.num_to_lock: assert ctx.machine_type, \ 'must specify machine type to lock' if ctx.brief or ctx.list or ctx.list_targets: assert ctx.desc is None, '--desc does nothing with --list/--brief' # we may need to update host keys for vms. Don't do it for # every vm; however, update any vms included in the list given # to the CLI (machines), or any owned by the specified owner or # invoking user if no machines are specified. vmachines = [] statuses = query.get_statuses(machines) owner = ctx.owner or misc.get_user() for machine in statuses: if query.is_vm(status=machine) and machine['locked'] and \ (machines or machine['locked_by'] == owner): vmachines.append(machine['name']) if vmachines: log.info("updating host keys for %s", ' '.join(sorted(vmachines))) keys.do_update_keys(vmachines, _raise=False) # get statuses again to refresh any updated keys statuses = query.get_statuses(machines) if statuses: statuses = util.winnow(statuses, ctx.machine_type, 'machine_type') if not machines and ctx.owner is None and not ctx.all: ctx.owner = misc.get_user() statuses = util.winnow(statuses, ctx.owner, 'locked_by') statuses = util.winnow(statuses, ctx.status, 'up', lambda s: s['up'] == (ctx.status == 'up')) statuses = util.winnow(statuses, ctx.locked, 'locked', lambda s: s['locked'] == (ctx.locked == 'true')) statuses = util.winnow(statuses, ctx.desc, 'description') statuses = util.winnow(statuses, ctx.desc_pattern, 'description', lambda s: s['description'] and \ ctx.desc_pattern in s['description']) if ctx.json_query: statuses = util.json_matching_statuses(ctx.json_query, statuses) statuses = util.winnow(statuses, ctx.os_type, 'os_type') statuses = util.winnow(statuses, ctx.os_version, 'os_version') # When listing, only show the vm_host's name, not every detail for s in statuses: if not query.is_vm(status=s): continue # with an OpenStack API, there is no host for a VM if s['vm_host'] is None: continue vm_host_name = s.get('vm_host', dict())['name'] if vm_host_name: s['vm_host'] = vm_host_name if ctx.list: print json.dumps(statuses, indent=4) elif ctx.brief: for s in sorted(statuses, key=lambda s: s.get('name')): locked = "un" if s['locked'] == 0 else " " mo = re.match('\w+@(\w+?)\..*', s['name']) host = mo.group(1) if mo else s['name'] print '{host} {locked}locked {owner} "{desc}"'.format( locked=locked, host=host, owner=s['locked_by'], desc=s['description']) else: frag = {'targets': {}} for f in statuses: frag['targets'][f['name']] = f['ssh_pub_key'] print yaml.safe_dump(frag, default_flow_style=False) else: log.error('error retrieving lock statuses') ret = 1 elif ctx.summary: do_summary(ctx) return 0 elif ctx.lock: if not util.vps_version_or_type_valid( ctx.machine_type, ctx.os_type, ctx.os_version): log.error('Invalid os-type or version detected -- lock failed') return 1 reimage_types = teuthology.provision.fog.get_types() reimage_machines = list() updatekeys_machines = list() for machine in machines: resp = ops.lock_one(machine, user, ctx.desc) if resp.ok: machine_status = resp.json() machine_type = machine_status['machine_type'] if not resp.ok: ret = 1 if not ctx.f: return ret elif not query.is_vm(machine, machine_status): if machine_type in reimage_types: # Reimage in parallel just below here reimage_machines.append(machine) # Update keys last updatekeys_machines = list() else: machines_to_update.append(machine) teuthology.provision.create_if_vm( ctx, misc.canonicalize_hostname(machine), ) with teuthology.parallel.parallel() as p: for machine in reimage_machines: p.spawn(teuthology.provision.reimage, ctx, machine) for machine in updatekeys_machines: keys.do_update_keys([machine]) elif ctx.unlock: if ctx.owner is None and user is None: user = misc.get_user() # If none of them are vpm, do them all in one shot if not filter(query.is_vm, machines): res = ops.unlock_many(machines, user) return 0 if res else 1 for machine in machines: if not ops.unlock_one(ctx, machine, user): ret = 1 if not ctx.f: return ret else: machines_to_update.append(machine) elif ctx.num_to_lock: result = ops.lock_many(ctx, ctx.num_to_lock, ctx.machine_type, user, ctx.desc, ctx.os_type, ctx.os_version, ctx.arch) if not result: ret = 1 else: machines_to_update = result.keys() if ctx.machine_type == 'vps': shortnames = ' '.join( [misc.decanonicalize_hostname(name) for name in result.keys()] ) if len(result) < ctx.num_to_lock: log.error("Locking failed.") for machine in result: ops.unlock_one(ctx, machine, user) ret = 1 else: log.info("Successfully Locked:\n%s\n" % shortnames) log.info( "Unable to display keys at this time (virtual " + "machines are booting).") log.info( "Please run teuthology-lock --list-targets %s once " + "these machines come up.", shortnames) else: print yaml.safe_dump( dict(targets=result), default_flow_style=False) elif ctx.update: assert ctx.desc is not None or ctx.status is not None, \ 'you must specify description or status to update' assert ctx.owner is None, 'only description and status may be updated' machines_to_update = machines if ctx.desc is not None or ctx.status is not None: for machine in machines_to_update: ops.update_lock(machine, ctx.desc, ctx.status) return ret