def check_token(client, token): for ignored in until_timeout(300): found = get_token_from_status(client) if found and token in found: return found raise JujuAssertionError('Token is not {}: {}'.format( token, found))
def wait_until_removed(client, agent_id): """Wait for an agent to be removed from the environment.""" for ignored in until_timeout(240): if not has_agent(client, agent_id): return else: raise AssertionError('Machine not destroyed: {}.'.format(agent_id))
def action_busy(client, applications): start = datetime.utcnow() for app in applications: client.juju('add-unit', (app, '-n', '1')) client.wait_for_started(timeout=1200) client.wait_for_workloads(timeout=1200) global total_new_models new_models = [] for i in range(0, 20): total_new_models += 1 new_model = client.add_model('model{}'.format(total_new_models)) new_model.wait_for_started() log.info('Added model number {}'.format(total_new_models)) new_models.append(new_model) for _ in until_timeout(MINUTE*2): log.info('Checking status ping.') client.show_status() log.info('Sleeping . . .') time.sleep(MINUTE/2) end = datetime.utcnow() log.info('Create action took: {}'.format( _convert_seconds_to_readable(int((end - start).total_seconds())))) return new_models
def assess_upgrade(old_client, juju_path): all_clients = _get_clients_to_upgrade(old_client, juju_path) # all clients have the same provider type, work this out once. if all_clients[0].env.provider == 'maas': timeout = 1200 else: timeout = 600 for client in all_clients: logging.info('Upgrading {}'.format(client.env.environment)) upgrade_juju(client) client.wait_for_version(client.get_matching_agent_version(), timeout) logging.info('Agents upgraded in {}'.format(client.env.environment)) client.show_status() logging.info('Waiting for model {}'.format(client.env.environment)) # While the agents are upgraded, the controller/model may still be # upgrading. We are only certain that the upgrade as is complete # when we can list models. for ignore in until_timeout(600): try: client.list_models() break except subprocess.CalledProcessError: pass # The upgrade will trigger the charm hooks. We want the charms to # return to active state to know they accepted the upgrade. client.wait_for_workloads() logging.info('Upgraded model {}'.format(client.env.environment))
def wait_for_success(sms, request, pause=3, verbose=False): for ignored in until_timeout(600): if verbose: print('.', end='') sys.stdout.flush() sleep(pause) op = sms.get_operation_status(request.request_id) if op.status == SUCCEEDED: break
def _terminate_instance(self, machine_id): log.info('Stopping instance {}'.format(machine_id)) self.client.stop_machine(machine_id) for ignored in until_timeout(30): stopping_machine = self.client._list_machines(machine_id) if stopping_machine['state'] == 'stopped': break sleep(3) else: raise Exception('Instance did not stop: {}'.format(machine_id)) log.info('Terminating instance {}'.format(machine_id)) self.client.delete_machine(machine_id)
def _delete_running_machine(self, machine_id): self.stop_machine(machine_id) for ignored in until_timeout(120): if self.verbose: print(".", end="") sys.stdout.flush() sleep(self.pause) stopping_machine = self._list_machines(machine_id) if stopping_machine['state'] == 'stopped': break if self.verbose: print("stopped") self.delete_machine(machine_id)
def check_security_groups(cls, client, env_groups): with make_substrate_manager( client, ['iter_instance_security_groups']) as substrate: if substrate is None: return for x in until_timeout(30): remain_groups = dict(substrate.iter_security_groups()) leftovers = set(remain_groups).intersection(env_groups) if len(leftovers) == 0: break group_text = ', '.join(sorted(remain_groups[l] for l in leftovers)) if group_text != '': raise Exception( 'Security group(s) not cleaned up: {}.'.format(group_text))
def _wait_for_model_check(client, model_check, timeout): """Wrapper to have a client wait for a model_check callable to succeed. :param client: ModelClient object to act on and pass into model_check :param model_check: Callable that takes a ModelClient object. When the callable reaches a success state it returns True. If model_check never returns True within `timeout`, the exception ModelCheckFailed will be raised. """ with client.check_timeouts(): with client.ignore_soft_deadline(): for _ in until_timeout(timeout): if model_check(client): return sleep(1) raise ModelCheckFailed()
def assert_logs_appear_in_client_model(client, expected_logs, timeout): """Assert that `expected_logs` appear in client logs within timeout. :param client: ModelClient object to query logs of. :param expected_logs: string containing log contents to check for. :param timeout: int seconds to wait for before raising JujuAssertionError. """ for _ in until_timeout(timeout): current_logs = client.get_juju_output( 'debug-log', '--no-tail', '--replay', '-l', 'DEBUG') if expected_logs in current_logs: log.info('SUCCESS: logs migrated.') return sleep(1) raise JujuAssertionError( 'Logs failed to be migrated after {}'.format(timeout))
def wait_for_http(url, timeout=600): ctx = _get_ssl_ctx() for _ in until_timeout(timeout): try: if ctx is None: req = urllib2.urlopen(url) else: req = urllib2.urlopen(url, context=ctx) if 200 == req.getcode(): break except (urllib2.URLError, urllib2.HTTPError): pass sleep(.1) else: raise JujuAssertionError('{} is not reachable'.format(url)) return req
def assert_logs_appear_in_client_model(client, expected_logs, timeout): """Assert that `expected_logs` appear in client logs within timeout. :param client: ModelClient object to query logs of. :param expected_logs: string containing log contents to check for. :param timeout: int seconds to wait for before raising JujuAssertionError. """ for _ in until_timeout(timeout): current_logs = client.get_juju_output('debug-log', '--no-tail', '--replay', '-l', 'DEBUG') if expected_logs in current_logs: log.info('SUCCESS: logs migrated.') return sleep(1) raise JujuAssertionError( 'Logs failed to be migrated after {}'.format(timeout))
def wait_for_state_server_to_shutdown(host, client, instance_id, timeout=60): print_now("Waiting for port to close on %s" % host) wait_for_port(host, 17070, closed=True, timeout=timeout) print_now("Closed.") try: provider_type = client.env.provider except NoProvider: provider_type = None if provider_type == 'openstack': for ignored in until_timeout(300): if not has_nova_instance(client.env, instance_id): print_now('{} was removed from nova list'.format(instance_id)) break else: raise Exception( '{} was not deleted:'.format(instance_id))
def wait_for_chaos(self, state='complete', timeout=300): if not ('complete' in state or 'start' in state): raise Exception('Unexpected state value: {}'.format(state)) for ignored in until_timeout(timeout): locks = defaultdict(list) for unit_name, unit in self.iter_chaos_monkey_units(): locks[self.get_unit_status(unit_name)].append(unit_name) if state == 'complete' and locks.keys() == ['done']: logging.debug( 'All lock files removed, chaos complete: {}'.format(locks)) break if state == 'start' and locks.keys() == ['running']: logging.debug( 'All lock files found, chaos started: {}'.format(locks)) break else: raise Exception('Chaos operations did not {}.'.format(state))
def wait_for_migrating(client, timeout=60): """Block until provided model client has a migration status. :raises JujuAssertionError: If the status doesn't show migration within the `timeout` period. """ model_name = client.env.environment with client.check_timeouts(): with client.ignore_soft_deadline(): for _ in until_timeout(timeout): model_details = client.show_model(model_name) migration_status = model_details[model_name]['status'].get( 'migration') if migration_status is not None: return sleep(1) raise JujuAssertionError( 'Model \'{}\' failed to start migration after' '{} seconds'.format(model_name, timeout))
def perfscale_longrun_perf(client, pprof_collector, args): test_length = args.run_length * (60 * MINUTE) longrun_start = datetime.utcnow() run_count = 0 for _ in until_timeout(test_length): applications = ['dummy-sink'] new_client = action_create(client) new_models = action_busy(new_client, applications) action_cleanup(new_client, new_models) action_rest(Rest.short/2) run_count += 1 longrun_end = datetime.utcnow() timing_data = TimingData(longrun_start, longrun_end) return DeployDetails( 'Longrun for {} Hours.'.format(test_length/60/60), {'Total action runs': run_count}, timing_data )
def start_libvirt_domain(uri, domain): """Call virsh to start the domain. @Parms URI: The address of the libvirt service. @Parm domain: The name of the domain. """ command = ['virsh', '-c', uri, 'start', domain] try: subprocess.check_output(command, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if 'already active' in e.output: return '%s is already running; nothing to do.' % domain raise Exception('%s failed:\n %s' % (command, e.output)) sleep(30) for ignored in until_timeout(120): if verify_libvirt_domain(uri, domain, LIBVIRT_DOMAIN_RUNNING): return "%s is now running" % domain sleep(2) raise Exception('libvirt domain %s did not start.' % domain)
def stop_libvirt_domain(uri, domain): """Call virsh to shutdown the domain. @Parms URI: The address of the libvirt service. @Parm domain: The name of the domain. """ command = ['virsh', '-c', uri, 'shutdown', domain] try: subprocess.check_output(command, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if 'domain is not running' in e.output: return ('%s is not running; nothing to do.' % domain) raise Exception('%s failed:\n %s' % (command, e.output)) sleep(30) for ignored in until_timeout(120): if verify_libvirt_domain(uri, domain, LIBVIRT_DOMAIN_SHUT_OFF): return "%s is now shut off" % domain sleep(2) raise Exception('libvirt domain %s is not shut off.' % domain)
def wait_for_migrating(client, timeout=60): """Block until provided model client has a migration status. :raises JujuAssertionError: If the status doesn't show migration within the `timeout` period. """ model_name = client.env.environment with client.check_timeouts(): with client.ignore_soft_deadline(): for _ in until_timeout(timeout): model_details = client.show_model(model_name) migration_status = model_details[model_name]['status'].get( 'migration') if migration_status is not None: return sleep(1) raise JujuAssertionError( 'Model \'{}\' failed to start migration after' '{} seconds'.format( model_name, timeout ))
def upload_by_build_number(self, build_number=None, pause_time=120, timeout=600): """ Upload build_number's test result. :param build_number: :param pause_time: Pause time in seconds between polling. :param timeout: Timeout in seconds. :return: None """ build_number = build_number or os.getenv('BUILD_NUMBER') if not build_number: raise ValueError('Build number is not set') self.jenkins_build.set_build_number(build_number) for _ in until_timeout(timeout): if self.jenkins_build.is_build_completed(): break sleep(pause_time) else: raise Exception("Build fails to complete: {}".format(build_number)) self.upload()
def wait_until_removed(client, to_remove, timeout=300): """Wait until none of the machines are listed in status. This is implemented as a context manager so that it is coroutine-friendly. The start of the timeout begins at the with statement, but the actual waiting (if any) is done when exiting the with block. Cloud performance differs. The caller must pass a timeout that matches the expected performance of the cloud. Most clouds need 300s to remove a machine, but aure will need much more. """ timeout_iter = until_timeout(timeout) yield to_remove = set(to_remove) for ignored in timeout_iter: status = client.get_status() machines = [k for k, v in status.iter_machines(containers=True) if k in to_remove] if machines == []: break else: raise Exception('Timed out waiting for removal')
def run_command(duration, timeout_signal, command): """Run a subprocess. If a timeout elapses, send specified signal. :param duration: Timeout in seconds. :param timeout_signal: Signal to send to the subprocess on timeout. :param command: Subprocess to run (Popen args). :return: exit status of the subprocess, 124 if the subprocess was signalled. """ if sys.platform == 'win32': # support CTRL_BREAK creationflags = subprocess.CREATE_NEW_PROCESS_GROUP else: creationflags = 0 proc = subprocess.Popen(command, creationflags=creationflags) for remaining in chain([None], until_timeout(duration)): result = proc.poll() if result is not None: return result time.sleep(0.1) else: proc.send_signal(timeout_signal) proc.wait() return 124
def test_control_heterogeneous(bs_manager, other, upload_tools): """Test if one binary can control an environment set up by the other.""" initial = bs_manager.client released = bs_manager.tear_down_client with run_context(bs_manager, other, upload_tools): token = prepare_dummy_env(initial) initial.wait_for_started() if sys.platform != "win32": # Currently, juju ssh is not working on Windows. check_token(initial, token) check_series(other) other.juju('run', ('--all', 'uname -a')) other.get_config('dummy-source') other.get_model_config() other.juju('remove-relation', ('dummy-source', 'dummy-sink')) status = other.get_status() other.juju('unexpose', ('dummy-sink',)) status = other.get_status() if status.get_applications()['dummy-sink']['exposed']: raise AssertionError('dummy-sink is still exposed') status = other.get_status() charm_path = local_charm_path( charm='dummy-sink', juju_ver=other.version) juju_with_fallback(other, released, 'deploy', (charm_path, 'sink2')) other.wait_for_started() other.juju('add-relation', ('dummy-source', 'sink2')) status = other.get_status() other.juju('expose', ('sink2',)) status = other.get_status() if 'sink2' not in status.get_applications(): raise AssertionError('Sink2 missing') other.remove_service('sink2') for ignored in until_timeout(30): status = other.get_status() if 'sink2' not in status.get_applications(): break else: raise AssertionError('Sink2 not destroyed') other.juju('add-relation', ('dummy-source', 'dummy-sink')) status = other.get_status() relations = status.get_applications()['dummy-sink']['relations'] if not relations['source'] == ['dummy-source']: raise AssertionError('source is not dummy-source.') other.juju('expose', ('dummy-sink',)) status = other.get_status() if not status.get_applications()['dummy-sink']['exposed']: raise AssertionError('dummy-sink is not exposed') other.juju('add-unit', ('dummy-sink',)) if not has_agent(other, 'dummy-sink/1'): raise AssertionError('dummy-sink/1 was not added.') other.juju('remove-unit', ('dummy-sink/1',)) status = other.get_status() if has_agent(other, 'dummy-sink/1'): raise AssertionError('dummy-sink/1 was not removed.') container_type = other.preferred_container() other.juju('add-machine', (container_type,)) status = other.get_status() container_machine, = set(k for k, v in status.agent_items() if k.endswith('/{}/0'.format(container_type))) container_holder = container_machine.split('/')[0] other.remove_machine(container_machine) wait_until_removed(other, container_machine) other.remove_machine(container_holder) wait_until_removed(other, container_holder)
def check_token(client, token): for ignored in until_timeout(300): found = get_token_from_status(client) if found and token in found: return found raise JujuAssertionError('Token is not {}: {}'.format(token, found))