def main(self): """ Entry point implementing the teuthology-openstack command. """ self.setup_logs() set_config_attr(self.args) for keyfile in [self.args.key_filename, os.environ['HOME'] + '/.ssh/id_rsa', os.environ['HOME'] + '/.ssh/id_dsa', os.environ['HOME'] + '/.ssh/id_ecdsa']: if (keyfile and os.path.isfile(keyfile)): self.key_filename = keyfile break self.verify_openstack() self.setup() exit_code = 0 if self.args.suite: if self.args.wait: self.reminders() exit_code = self.run_suite() self.reminders() if self.args.teardown: if self.args.suite and not self.args.wait: log.error("it does not make sense to teardown a cluster" " right after a suite is scheduled") else: self.teardown() return exit_code
def main(self): """ Entry point implementing the teuthology-openstack command. """ self.setup_logs() set_config_attr(self.args) for keyfile in [ self.args.key_filename, os.environ['HOME'] + '/.ssh/id_rsa', os.environ['HOME'] + '/.ssh/id_dsa', os.environ['HOME'] + '/.ssh/id_ecdsa' ]: if (keyfile and os.path.isfile(keyfile)): self.key_filename = keyfile break self.verify_openstack() self.setup() exit_code = 0 if self.args.suite: if self.args.wait: self.reminders() exit_code = self.run_suite() self.reminders() if self.args.teardown: if self.args.suite and not self.args.wait: log.error("it does not make sense to teardown a cluster" " right after a suite is scheduled") else: self.teardown() return exit_code
def main(self): """ Entry point implementing the teuthology-openstack command. """ self.setup_logs() set_config_attr(self.args) self.key_filename = self.args.key_filename self.verify_openstack() ip = self.setup() if self.args.suite: self.run_suite() if self.args.key_filename: identity = '-i ' + self.args.key_filename + ' ' else: identity = '' if self.args.upload: upload = 'upload to : ' + self.args.archive_upload else: upload = '' log.info(""" pulpito web interface: http://{ip}:8081/ ssh access : ssh {identity}{username}@{ip} # logs in /usr/share/nginx/html {upload}""".format(ip=ip, username=self.username, identity=identity, upload=upload)) if self.args.teardown: self.teardown()
def setup_class(self): if 'OS_AUTH_URL' not in os.environ: pytest.skip('no OS_AUTH_URL environment variable') teuthology.log.setLevel(logging.DEBUG) set_config_attr(argparse.Namespace()) ip = TeuthologyOpenStack.create_floating_ip() if ip: ip_id = TeuthologyOpenStack.get_floating_ip_id(ip) OpenStack().run("ip floating delete " + ip_id) self.can_create_floating_ips = True else: self.can_create_floating_ips = False
def main(self): """ Entry point implementing the teuthology-openstack command. """ self.setup_logs() set_config_attr(self.args) self.key_filename = self.args.key_filename self.verify_openstack() self.setup() if self.args.suite: if self.args.wait: self.reminders() self.run_suite() self.reminders() if self.args.teardown: if self.args.suite and not self.args.wait: log.error("it does not make sense to teardown a cluster" " right after a suite is scheduled") else: self.teardown()
def main(ctx): loglevel = logging.INFO if ctx.verbose: loglevel = logging.DEBUG log.setLevel(loglevel) log_file_path = os.path.join( ctx.log_dir, 'worker.{tube}.{pid}'.format( pid=os.getpid(), tube=ctx.tube, )) setup_log_file(log_file_path) install_except_hook() load_config(ctx=ctx) set_config_attr(ctx) connection = beanstalk.connect() beanstalk.watch_tube(connection, ctx.tube) result_proc = None if teuth_config.teuthology_path is None: fetch_teuthology('master') fetch_qa_suite('master') keep_running = True while keep_running: # Check to see if we have a teuthology-results process hanging around # and if so, read its return code so that it can exit. if result_proc is not None and result_proc.poll() is not None: log.debug("teuthology-results exited with code: %s", result_proc.returncode) result_proc = None if sentinel(restart_file_path): restart() elif sentinel(stop_file_path): stop() load_config() job = connection.reserve(timeout=60) if job is None: continue # bury the job so it won't be re-run if it fails job.bury() job_id = job.jid log.info('Reserved job %d', job_id) log.info('Config is: %s', job.body) job_config = yaml.safe_load(job.body) job_config['job_id'] = str(job_id) if job_config.get('stop_worker'): keep_running = False try: job_config, teuth_bin_path = prep_job( job_config, log_file_path, ctx.archive_dir, ) run_job( job_config, teuth_bin_path, ctx.archive_dir, ctx.verbose, ) except SkipJob: continue # This try/except block is to keep the worker from dying when # beanstalkc throws a SocketError try: job.delete() except Exception: log.exception("Saw exception while trying to delete job")
def setup_class(self): teuthology.log.setLevel(logging.DEBUG) set_config_attr(argparse.Namespace()) self.teardown_class()
def main(ctx): if ctx.verbose: teuthology.log.setLevel(logging.DEBUG) set_config_attr(ctx) ret = 0 user = ctx.owner machines = [ misc.canonicalize_hostname(m, user=False) for m in ctx.machines ] machines_to_update = [] if ctx.targets: try: with open(ctx.targets) as f: g = yaml.safe_load_all(f) for new in g: if 'targets' in new: for t in new['targets'].iterkeys(): machines.append(t) except IOError as e: raise argparse.ArgumentTypeError(str(e)) if ctx.f: assert ctx.lock or ctx.unlock, \ '-f is only supported by --lock and --unlock' if machines: assert ctx.lock or ctx.unlock or ctx.list or ctx.list_targets \ or ctx.update or ctx.brief, \ 'machines cannot be specified with that operation' else: if ctx.lock: log.error("--lock requires specific machines passed as arguments") else: # This condition might never be hit, but it's not clear. assert ctx.num_to_lock or ctx.list or ctx.list_targets or \ ctx.summary or ctx.brief, \ 'machines must be specified for that operation' if ctx.all: assert ctx.list or ctx.list_targets or ctx.brief, \ '--all can only be used with --list, --list-targets, and --brief' assert ctx.owner is None, \ '--all and --owner are mutually exclusive' assert not machines, \ '--all and listing specific machines are incompatible' if ctx.num_to_lock: assert ctx.machine_type, \ 'must specify machine type to lock' if ctx.brief or ctx.list or ctx.list_targets: assert ctx.desc is None, '--desc does nothing with --list/--brief' # we may need to update host keys for vms. Don't do it for # every vm; however, update any vms included in the list given # to the CLI (machines), or any owned by the specified owner or # invoking user if no machines are specified. vmachines = [] statuses = query.get_statuses(machines) owner = ctx.owner or misc.get_user() for machine in statuses: if query.is_vm(status=machine) and machine['locked'] and \ (machines or machine['locked_by'] == owner): vmachines.append(machine['name']) if vmachines: log.info("updating host keys for %s", ' '.join(sorted(vmachines))) keys.do_update_keys(vmachines, _raise=False) # get statuses again to refresh any updated keys statuses = query.get_statuses(machines) if statuses: statuses = util.winnow(statuses, ctx.machine_type, 'machine_type') if not machines and ctx.owner is None and not ctx.all: ctx.owner = misc.get_user() statuses = util.winnow(statuses, ctx.owner, 'locked_by') statuses = util.winnow(statuses, ctx.status, 'up', lambda s: s['up'] == (ctx.status == 'up')) statuses = util.winnow( statuses, ctx.locked, 'locked', lambda s: s['locked'] == (ctx.locked == 'true')) statuses = util.winnow(statuses, ctx.desc, 'description') statuses = util.winnow(statuses, ctx.desc_pattern, 'description', lambda s: s['description'] and \ ctx.desc_pattern in s['description']) if ctx.json_query: statuses = util.json_matching_statuses(ctx.json_query, statuses) statuses = util.winnow(statuses, ctx.os_type, 'os_type') statuses = util.winnow(statuses, ctx.os_version, 'os_version') # When listing, only show the vm_host's name, not every detail for s in statuses: if not query.is_vm(status=s): continue # with an OpenStack API, there is no host for a VM if s['vm_host'] is None: continue vm_host_name = s.get('vm_host', dict())['name'] if vm_host_name: s['vm_host'] = vm_host_name if ctx.list: print json.dumps(statuses, indent=4) elif ctx.brief: for s in sorted(statuses, key=lambda s: s.get('name')): locked = "un" if s['locked'] == 0 else " " mo = re.match('\w+@(\w+?)\..*', s['name']) host = mo.group(1) if mo else s['name'] print '{host} {locked}locked {owner} "{desc}"'.format( locked=locked, host=host, owner=s['locked_by'], desc=s['description']) else: frag = {'targets': {}} for f in statuses: frag['targets'][f['name']] = f['ssh_pub_key'] print yaml.safe_dump(frag, default_flow_style=False) else: log.error('error retrieving lock statuses') ret = 1 elif ctx.summary: do_summary(ctx) return 0 elif ctx.lock: if not util.vps_version_or_type_valid(ctx.machine_type, ctx.os_type, ctx.os_version): log.error('Invalid os-type or version detected -- lock failed') return 1 reimage_types = teuthology.provision.fog.get_types() reimage_machines = list() updatekeys_machines = list() for machine in machines: resp = ops.lock_one(machine, user, ctx.desc) if resp.ok: machine_status = resp.json() machine_type = machine_status['machine_type'] if not resp.ok: ret = 1 if not ctx.f: return ret elif not query.is_vm(machine, machine_status): if machine_type in reimage_types: # Reimage in parallel just below here reimage_machines.append(machine) # Update keys last updatekeys_machines = list() else: machines_to_update.append(machine) teuthology.provision.create_if_vm( ctx, misc.canonicalize_hostname(machine), ) with teuthology.parallel.parallel() as p: for machine in reimage_machines: p.spawn(teuthology.provision.reimage, ctx, machine) for machine in updatekeys_machines: keys.do_update_keys([machine]) elif ctx.unlock: if ctx.owner is None and user is None: user = misc.get_user() # If none of them are vpm, do them all in one shot if not filter(query.is_vm, machines): res = ops.unlock_many(machines, user) return 0 if res else 1 for machine in machines: if not ops.unlock_one(ctx, machine, user): ret = 1 if not ctx.f: return ret else: machines_to_update.append(machine) elif ctx.num_to_lock: result = ops.lock_many(ctx, ctx.num_to_lock, ctx.machine_type, user, ctx.desc, ctx.os_type, ctx.os_version, ctx.arch) if not result: ret = 1 else: machines_to_update = result.keys() if ctx.machine_type == 'vps': shortnames = ' '.join([ misc.decanonicalize_hostname(name) for name in result.keys() ]) if len(result) < ctx.num_to_lock: log.error("Locking failed.") for machine in result: ops.unlock_one(ctx, machine, user) ret = 1 else: log.info("Successfully Locked:\n%s\n" % shortnames) log.info("Unable to display keys at this time (virtual " + "machines are booting).") log.info( "Please run teuthology-lock --list-targets %s once " + "these machines come up.", shortnames) else: print yaml.safe_dump(dict(targets=result), default_flow_style=False) elif ctx.update: assert ctx.desc is not None or ctx.status is not None, \ 'you must specify description or status to update' assert ctx.owner is None, 'only description and status may be updated' machines_to_update = machines if ctx.desc is not None or ctx.status is not None: for machine in machines_to_update: ops.update_lock(machine, ctx.desc, ctx.status) return ret
def main(ctx): if ctx.verbose: teuthology.log.setLevel(logging.DEBUG) set_config_attr(ctx) ret = 0 user = ctx.owner machines = [misc.canonicalize_hostname(m, user=False) for m in ctx.machines] machines_to_update = [] if ctx.targets: try: with file(ctx.targets) as f: g = yaml.safe_load_all(f) for new in g: if 'targets' in new: for t in new['targets'].iterkeys(): machines.append(t) except IOError as e: raise argparse.ArgumentTypeError(str(e)) if ctx.f: assert ctx.lock or ctx.unlock, \ '-f is only supported by --lock and --unlock' if machines: assert ctx.lock or ctx.unlock or ctx.list or ctx.list_targets \ or ctx.update or ctx.brief, \ 'machines cannot be specified with that operation' else: if ctx.lock: log.error("--lock requires specific machines passed as arguments") else: # This condition might never be hit, but it's not clear. assert ctx.num_to_lock or ctx.list or ctx.list_targets or \ ctx.summary or ctx.brief, \ 'machines must be specified for that operation' if ctx.all: assert ctx.list or ctx.list_targets or ctx.brief, \ '--all can only be used with --list, --list-targets, and --brief' assert ctx.owner is None, \ '--all and --owner are mutually exclusive' assert not machines, \ '--all and listing specific machines are incompatible' if ctx.num_to_lock: assert ctx.machine_type, \ 'must specify machine type to lock' if ctx.brief or ctx.list or ctx.list_targets: assert ctx.desc is None, '--desc does nothing with --list/--brief' # we may need to update host keys for vms. Don't do it for # every vm; however, update any vms included in the list given # to the CLI (machines), or any owned by the specified owner or # invoking user if no machines are specified. vmachines = [] statuses = query.get_statuses(machines) owner = ctx.owner or misc.get_user() for machine in statuses: if query.is_vm(status=machine) and machine['locked'] and \ (machines or machine['locked_by'] == owner): vmachines.append(machine['name']) if vmachines: log.info("updating host keys for %s", ' '.join(sorted(vmachines))) keys.do_update_keys(vmachines, _raise=False) # get statuses again to refresh any updated keys statuses = query.get_statuses(machines) if statuses: statuses = util.winnow(statuses, ctx.machine_type, 'machine_type') if not machines and ctx.owner is None and not ctx.all: ctx.owner = misc.get_user() statuses = util.winnow(statuses, ctx.owner, 'locked_by') statuses = util.winnow(statuses, ctx.status, 'up', lambda s: s['up'] == (ctx.status == 'up')) statuses = util.winnow(statuses, ctx.locked, 'locked', lambda s: s['locked'] == (ctx.locked == 'true')) statuses = util.winnow(statuses, ctx.desc, 'description') statuses = util.winnow(statuses, ctx.desc_pattern, 'description', lambda s: s['description'] and \ ctx.desc_pattern in s['description']) if ctx.json_query: statuses = util.json_matching_statuses(ctx.json_query, statuses) statuses = util.winnow(statuses, ctx.os_type, 'os_type') statuses = util.winnow(statuses, ctx.os_version, 'os_version') # When listing, only show the vm_host's name, not every detail for s in statuses: if not query.is_vm(status=s): continue # with an OpenStack API, there is no host for a VM if s['vm_host'] is None: continue vm_host_name = s.get('vm_host', dict())['name'] if vm_host_name: s['vm_host'] = vm_host_name if ctx.list: print json.dumps(statuses, indent=4) elif ctx.brief: for s in sorted(statuses, key=lambda s: s.get('name')): locked = "un" if s['locked'] == 0 else " " mo = re.match('\w+@(\w+?)\..*', s['name']) host = mo.group(1) if mo else s['name'] print '{host} {locked}locked {owner} "{desc}"'.format( locked=locked, host=host, owner=s['locked_by'], desc=s['description']) else: frag = {'targets': {}} for f in statuses: frag['targets'][f['name']] = f['ssh_pub_key'] print yaml.safe_dump(frag, default_flow_style=False) else: log.error('error retrieving lock statuses') ret = 1 elif ctx.summary: do_summary(ctx) return 0 elif ctx.lock: if not util.vps_version_or_type_valid( ctx.machine_type, ctx.os_type, ctx.os_version): log.error('Invalid os-type or version detected -- lock failed') return 1 reimage_types = teuthology.provision.fog.get_types() reimage_machines = list() updatekeys_machines = list() for machine in machines: resp = ops.lock_one(machine, user, ctx.desc) if resp.ok: machine_status = resp.json() machine_type = machine_status['machine_type'] if not resp.ok: ret = 1 if not ctx.f: return ret elif not query.is_vm(machine, machine_status): if machine_type in reimage_types: # Reimage in parallel just below here reimage_machines.append(machine) # Update keys last updatekeys_machines = list() else: machines_to_update.append(machine) teuthology.provision.create_if_vm( ctx, misc.canonicalize_hostname(machine), ) with teuthology.parallel.parallel() as p: for machine in reimage_machines: p.spawn(teuthology.provision.reimage, ctx, machine) for machine in updatekeys_machines: keys.do_update_keys([machine]) elif ctx.unlock: if ctx.owner is None and user is None: user = misc.get_user() # If none of them are vpm, do them all in one shot if not filter(query.is_vm, machines): res = ops.unlock_many(machines, user) return 0 if res else 1 for machine in machines: if not ops.unlock_one(ctx, machine, user): ret = 1 if not ctx.f: return ret else: machines_to_update.append(machine) elif ctx.num_to_lock: result = ops.lock_many(ctx, ctx.num_to_lock, ctx.machine_type, user, ctx.desc, ctx.os_type, ctx.os_version, ctx.arch) if not result: ret = 1 else: machines_to_update = result.keys() if ctx.machine_type == 'vps': shortnames = ' '.join( [misc.decanonicalize_hostname(name) for name in result.keys()] ) if len(result) < ctx.num_to_lock: log.error("Locking failed.") for machine in result: ops.unlock_one(ctx, machine, user) ret = 1 else: log.info("Successfully Locked:\n%s\n" % shortnames) log.info( "Unable to display keys at this time (virtual " + "machines are booting).") log.info( "Please run teuthology-lock --list-targets %s once " + "these machines come up.", shortnames) else: print yaml.safe_dump( dict(targets=result), default_flow_style=False) elif ctx.update: assert ctx.desc is not None or ctx.status is not None, \ 'you must specify description or status to update' assert ctx.owner is None, 'only description and status may be updated' machines_to_update = machines if ctx.desc is not None or ctx.status is not None: for machine in machines_to_update: ops.update_lock(machine, ctx.desc, ctx.status) return ret