def assert_wait_servers(step, serv_count): role = world.get_role() serv_count = int(serv_count) timeout = 60 * 15 * serv_count LOG.info('Wait %s servers, timeout %s seconds' % (serv_count, timeout)) wait_until(world.wait_servers_running, args=(role, serv_count), timeout=timeout, error_text='Not see %s servers running' % serv_count)
def check_options_in_nginx_upstream(step, option, serv_as): server = getattr(world, serv_as) node = world.cloud.get_node(server) LOG.info('Verify %s in proxies config' % option) wait_until(check_config_for_option, args=[node, 'proxies.include', option], timeout=180, error_text='Parameter \'%s\' not found in proxies.include' % option)
def farm(request: FixtureRequest) -> Farm: if CONF.main.farm_id is None: LOG.info('Farm ID not set, create a new farm for test') test_farm = Farm.create(f'tmprev-{datetime.now().strftime("%d%m%H%M%f")}', 'Revizor farm for tests\n' f'RV_BRANCH={CONF.feature.branch}\n' f'RV_PLATFORM={CONF.feature.platform.name}\n' f'RV_DIST={CONF.feature.dist.dist}\n') CONF.main.farm_id = test_farm.id else: LOG.info(f'Farm ID is set in config, use it: {CONF.main.farm_id}') test_farm = Farm.get(CONF.main.farm_id) lib_farm.clear(test_farm) LOG.info(f'Returning test farm: {test_farm.id}') try: yield test_farm finally: failed_count = request.session.testsfailed LOG.info('Failed tests: %s' % failed_count) if (failed_count == 0 and CONF.feature.stop_farm) or (CONF.feature.stop_farm and CONF.scalr.te_id): LOG.info('Clear and stop farm...') test_farm.terminate() IMPL.farm.clear_roles(test_farm.id) if test_farm.name.startswith('tmprev'): LOG.info('Delete working temporary farm') try: LOG.info('Wait all servers in farm terminated before delete') wait_until(lib_server.farm_servers_state, args=(test_farm, 'terminated'), timeout=1800, error_text='Servers in farm not terminated too long') test_farm.destroy() except Exception as e: LOG.warning(f'Farm cannot be deleted: {str(e)}') LOG.info('Farm finalize complete')
def not_in_config(step, serv_as, serv_config): time.sleep(180) server = getattr(world, serv_as) server_config = getattr(world, serv_config) cloud_serv = world.cloud.get_node(server_config) wait_until(check_not_in_config, args=(cloud_serv, server), timeout=1200, error_text='%s with IP %s in config' % (serv_as, server.private_ip))
def check_options_in_nginx_upstream(step, option, serv_as): option = option.split() host, backend_port = option[0].split(':') if ':' in option[0] else (option[0], 80) server = getattr(world, serv_as) node = world.cloud.get_node(server) LOG.info('Verify %s in upstream config' % option) if len(option) == 1: wait_until(check_config_for_option, args=[node, 'app-servers.include', option[0]], timeout=180, error_text="Options '%s' not in upstream config: %s" % ( option, node.run('cat /etc/nginx/app-servers.include').std_out)) elif len(option) > 1: serv = getattr(world, host, None) hostname = serv.private_ip if serv else host if option[1] == 'default': upstream_url = "%s:%s;" % (hostname, backend_port) else: upstream_url = "%s:%s %s;" % (hostname, backend_port, option[1]) if option[-1].startswith('weight'): upstream_url = upstream_url.replace(';', ' %s;' % option[-1]) LOG.info('Verify \'%s\' in upstream' % upstream_url) wait_until(check_config_for_option, args=[node, 'app-servers.include', upstream_url], timeout=180, error_text='Upstream config not contains "%s"' % upstream_url)
def check_options_in_nginx_upstream(step, option, serv_as): time.sleep( 60) #TODO: Change this behavior (wait scalarizr open port in selinux) server = getattr(world, serv_as) node = world.cloud.get_node(server) LOG.info('Verify %s in upstream config' % option) option = option.split() if len(option) == 1: wait_until(check_config_for_option, args=[node, 'app-servers.include', option[0]], timeout=180, error_text="Options '%s' not in upstream config: %s" % (option, node.run('cat /etc/nginx/app-servers.include')[0])) elif len(option) > 1: host, backend_port = option[0].split(':') if ':' in option[0] else ( option[0], 80) serv = getattr(world, host, None) hostname = serv.private_ip if serv else host if option[1] == 'default': upstream_url = "%s:%s;" % (hostname, backend_port) else: upstream_url = "%s:%s %s;" % (hostname, backend_port, option[1]) if option[-1].startswith('weight'): upstream_url = upstream_url.replace(';', ' %s;' % option[-1]) LOG.info('Verify \'%s\' in upstream' % upstream_url) wait_until(check_config_for_option, args=[node, 'app-servers.include', upstream_url], timeout=180, error_text='Upstream config not contains "%s"' % upstream_url)
def check_log(step, message, serv_as): server = getattr(world, serv_as) node = world.cloud.get_node(server) LOG.info('Check scalarizr log for termination') wait_until(world.check_text_in_scalarizr_log, timeout=300, args=(node, message), error_text='Not see %s in debug log' % message)
def check_proxy_in_nginx_config(step, www_serv, vhost_name): serv = getattr(world, www_serv) domain = getattr(world, vhost_name) node = world.cloud.get_node(serv) LOG.info('Proxies config for server %s' % serv.public_ip) wait_until(check_config_for_option, args=[node, 'proxies.include', domain.name], timeout=180, error_text='Not see domain %s in proxies.include' % domain)
def assert_server_event(step, events_type, serv_as): server = getattr(world, serv_as) LOG.info('Check "%s" events were fired by %s' % (events_type, server.id)) err_msg = '"%s" events were not fired by %s' % (events_type, server.id) wait_until(world.is_events_fired, args=(server, events_type), timeout=300, logger=LOG, error_text=err_msg)
def check_log(step, serv_as): server = getattr(world, serv_as) node = world.cloud.get_node(server) LOG.info("Check scalarizr log for termination") wait_until( world.check_text_in_scalarizr_log, timeout=300, args=(node, "Scalarizr terminated"), error_text='Not see "Scalarizr terminated" in debug log', )
def assert_server_event(server: Server, events_type: list): """Assert server events""" LOG.info('Check "%s" events were fired by %s' % (events_type, server.id)) err_msg = '"%s" events were not fired by %s' % (events_type, server.id) wait_until( is_events_fired, args=(server, events_type), timeout=300, logger=LOG, error_text=err_msg)
def assert_check_upstream_after_delete(step, www_serv, have, app_serv): server = getattr(world, app_serv) www_serv = getattr(world, www_serv) if have: LOG.info('Check if upstream not have %s in list' % server.id) wait_until(world.wait_upstream_in_config, args=(world.cloud.get_node(www_serv), server.private_ip, False), timeout=180, error_text="Upstream %s in list" % server.private_ip) else: LOG.info('Check if upstream have %s in list' % server.id) wait_until(world.wait_upstream_in_config, args=(world.cloud.get_node(www_serv), server.private_ip), timeout=180, error_text="Upstream %s not in list" % server.private_ip)
def check_ebs_status(step, serv_as, status): """Check EBS storage status""" if CONF.feature.driver.current_cloud == Platform.GCE: return time.sleep(30) server = getattr(world, serv_as) wait_until(world.check_server_storage, args=(serv_as, status), timeout=300, error_text='Volume from server %s is not %s' % (server.id, status))
def check_ebs_status(step, serv_as, status): """Check EBS storage status""" if CONF.main.driver == Platform.GCE: return time.sleep(30) server = getattr(world, serv_as) wait_until( world.check_server_storage, args=(serv_as, status), timeout=300, error_text="Volume from server %s is not %s" % (server.id, status), )
def assert_check_upstream_after_delete(step, www_serv, have, app_serv, private_ip=None): server = getattr(world, app_serv) www_serv = getattr(world, www_serv) ip = getattr(world, '%s_private_ip' % app_serv) if private_ip else server.private_ip if have: LOG.info('Check if upstream not have %s in list' % server.id) wait_until(world.wait_upstream_in_config, args=(world.cloud.get_node(www_serv), ip, False), timeout=180, error_text="Server %s (%s) in upstream list" % (server.id, ip)) else: LOG.info('Check if upstream have %s in list' % server.id) wait_until(world.wait_upstream_in_config, args=(world.cloud.get_node(www_serv), ip), timeout=180, error_text="Server %s (%s) not in upstream list" % (server.id, ip))
def listen_only(step, port, serv_as): world.farm.servers.reload() servers = [s for s in world.farm.servers if s.status == 'Running'] server = getattr(world, serv_as) port = port.strip() wait_until(world.check_open_port, args=(server, port.strip()), timeout=600, error_text="Port %s is not open" % port) for serv in servers: if serv.id == server.id: continue time.sleep(60) res = world.check_open_port(serv, port.strip()) world.assert_exist(res, 'Port %s is open, but must be closed in server %s' % (port, serv.id))
def app_server_should_be_clean(step, serv_as): server = getattr(world, serv_as) node = world.cloud.get_node(server) ip = '127.0.0.1:80' LOG.info('Check if default ip is in list') wait_until(world.wait_upstream_in_config, args=(node,ip), timeout=180,error_text="Server %s (%s) not in upstream list" % (server.id, ip)) out = node.run('cat /etc/nginx/app-servers.include').std_out ips = re.findall(r"((?:\d+\.?){4}:\d+)", out) if not len(ips) == 1: raise AssertionError('In default app-servers.include must be only one host, but it: %s (%s)' % (len(ips), ips)) if not ips[0] == ip: raise AssertionError('First host in default app-server.include is not localhost, it: %s' % ips)
def wait_voume_snapshot(step): # NOTE: migrated def is_snapshot_completed(**kwargs): status = IMPL.aws_tools.snapshots_list(**kwargs)[0]['status'] LOG.info('Wait for volume snapshot completed, actual state is: %s ' % status) return status == "completed" wait_until( is_snapshot_completed, kwargs=dict( location=CONF.feature.platform.location, snapshot_id=getattr(world, 'volume_snapshot_id')), timeout=600, logger=LOG)
def assert_role_task_created(step, timeout=1400): platform = CONF.feature.platform res = wait_until( IMPL.bundle.assert_role_task_created, args=(world.bundle_task.get('id'), ), timeout=timeout, error_text="Time out error. Can't create role with behaviors: %s." % CONF.feature.behaviors) if res.get('failure_reason'): raise AssertionError("Can't create role: %s. Error: %s" % (res['role_id'], res['failure_reason'])) LOG.info('New role was created successfully with Role_id: %s.' % res['role_id']) world.bundled_role_id = res['role_id'] #Remove port forward rule for Cloudstack if platform.is_cloudstack: LOG.info('Deleting a Port Forwarding Rule. IP:%s, Port:%s' % (world.forwarded_port, world.ip)) if not world.cloud.close_port(world.cloud_server, world.forwarded_port, ip=world.ip): raise AssertionError("Can't delete a port forwarding Rule.") LOG.info('Port Forwarding Rule was successfully removed.') #Destroy virtual machine in Cloud LOG.info('Destroying virtual machine %s in Cloud' % world.cloud_server.id) try: if not world.cloud_server.destroy(): raise AssertionError("Can't destroy node with id: %s." % world.cloud_server.id) except Exception as e: if platform.is_gce: if world.cloud_server.name in str(e): pass else: raise LOG.info('Virtual machine %s was successfully destroyed.' % world.cloud_server.id) world.cloud_server = None
def verify_listen_for_port(step, serv_as, option, port): LOG.info("Verify backends servers in config") haproxy_server = getattr(world, serv_as) port = int(port) LOG.info("Backend port: %s" % port) node = world.cloud.get_node(haproxy_server) config = wait_until(check_config_for_option, args=[node, 'listens', port], timeout=300, error_text='No listens section in HAProxy config') LOG.debug("HAProxy config : %s" % config) if option == 'backend': for opt in config['listens'][port]: if re.match('default_backend scalr(?:\:\d+)?:backend(?:\:\w+)?:%s' % port, opt): LOG.info('Haproxy server "%s" has default_backend for "%s" port: "%s"' % (haproxy_server.id, port, opt)) break else: raise AssertionError( "Listens sections not contain backend for '%s' port: %s" % (port, config['listens'][port])) else: proxy = getattr(world, '%s_proxy' % option.split()[0]) proxy_template = [i.strip() for i in proxy["proxy_template"].strip().split('\n')] if [i for i in config['listens'][port] if i in proxy_template] == proxy_template: LOG.info('Haproxy server "%s" has correct proxy template for "%s" port: "%s"' % ( haproxy_server.id, port, proxy_template)) else: raise AssertionError("Listens sections not contain '%s' for '%s' port: %s" % (option, port, config['listens'][port]))
def assert_role_task_created(step, timeout=1400): res = wait_until( IMPL.bundle.assert_role_task_created, args=(world.bundle_task.get('id'), ), timeout=timeout, error_text="Time out error. Can't create role with behaviors: %s." % CONF.feature.behaviors) if res.get('failure_reason'): raise AssertionError("Can't create role: %s. Error: %s" % (res['role_id'],res['failure_reason'])) LOG.info('New role was created successfully with Role_id: %s.' % res['role_id']) world.bundled_role_id = res['role_id'] #Remove port forward rule for Cloudstack if CONF.feature.driver.current_cloud in [Platform.CLOUDSTACK, Platform.IDCF, Platform.KTUCLOUD]: LOG.info('Deleting a Port Forwarding Rule. IP:%s, Port:%s' % (world.forwarded_port, world.ip)) if not world.cloud.close_port(world.cloud_server, world.forwarded_port, ip=world.ip): raise AssertionError("Can't delete a port forwarding Rule.") LOG.info('Port Forwarding Rule was successfully removed.') #Destroy virtual machine in Cloud LOG.info('Destroying virtual machine %s in Cloud' % world.cloud_server.id) try: if not world.cloud_server.destroy(): raise AssertionError("Can't destroy node with id: %s." % world.cloud_server.id) except Exception as e: if CONF.feature.driver.current_cloud == Platform.GCE: if world.cloud_server.name in str(e): pass else: raise LOG.info('Virtual machine %s was successfully destroyed.' % world.cloud_server.id) world.cloud_server = None
def is_scalarizr_connected(step, timeout=1400): LOG.info('Establish connection with scalarizr.') #Whait outbound request from scalarizr res = wait_until( IMPL.bundle.check_scalarizr_connection, args=(world.server.id, ), timeout=timeout, error_text="Time out error. Can't establish connection with scalarizr." ) if res.get('failure_reason'): raise AssertionError("Bundle task {id} failed. Error: {msg}".format( id=res['id'], msg=res['failure_reason'])) world.bundle_task = res if not res['behaviors']: world.bundle_task.update({'behaviors': ['base']}) elif 'base' not in res['behaviors']: world.bundle_task.update({ 'behaviors': ','.join((','.join(res['behaviors']), 'base')).split(',') }) else: world.bundle_task.update({'behaviors': res['behaviors']}) LOG.info( 'Connection with scalarizr was established. Received the following behaviors: %s' % world.bundle_task['behaviors'])
def install_scalarizr_to_server(server: Server, cloud: Cloud, use_sysprep: bool = False, use_rv_to_branch: bool = False, custom_branch: str = None) -> str: """ Install scalarizr to linux or windows server from branch :param server: Server for scalarizr :param cloud: Cloud object :param use_sysprep: If True and windows, run sysprep :param use_rv_to_branch: Get branch from RV_TO_BRANCH :param custom_branch: Use custom branch :return: Installed scalarizr version """ if server: server.reload() LOG.debug('Cloud server not found get node from server') node = wait_until(cloud.get_node, args=(server,), timeout=300, logger=LOG) LOG.debug('Node get successfully: %s' % node) rv_branch = CONF.feature.branch rv_to_branch = CONF.feature.to_branch if use_rv_to_branch: branch = rv_to_branch elif custom_branch: branch = custom_branch else: branch = rv_branch LOG.info('Installing scalarizr from branch %s' % branch) scalarizr_ver = node.install_scalarizr(branch=branch) if use_sysprep and node.os.is_windows: run_sysprep(cloud, node) LOG.debug('Scalarizr %s was successfully installed' % scalarizr_ver) return scalarizr_ver
def add_objects(step, obj, serv_as): """ Insert data to RabbitMQ server """ serv = getattr(world, serv_as) node = world.cloud.get_node(serv) password = wait_until(world.wait_rabbitmq_cp, timeout=360, error_text="Not see detail to rabbitmq panel")['password'] setattr(world, 'rabbitmq_password', password) LOG.info('Rabbitmq password: %s' % password) if obj == 'user': node.run('rabbitmqctl add_user testuser testpassword') LOG.info('Add user scalr to rabbitmq') elif obj == 'vhost': node.run('rabbitmqctl add_vhost testvhost') LOG.info('Add vhost "testvhost" to rabbitmq') elif obj == 'queue': credentials = pika.PlainCredentials('scalr', password) connection = pika.BlockingConnection(pika.ConnectionParameters(credentials=credentials, host=str(serv.public_ip))) channel = connection.channel() channel.queue_declare(queue='test_queue', durable=True) LOG.info('Add queue "test_queue" to rabbitmq') elif obj == 'message': credentials = pika.PlainCredentials('scalr', password) connection = pika.BlockingConnection(pika.ConnectionParameters(credentials=credentials, host=str(serv.public_ip))) channel = connection.channel() channel.basic_publish(exchange='', routing_key='test_queue', body='super test message', properties=pika.BasicProperties(delivery_mode=2,)) LOG.info('Add message to rabbitmq')
def listen_only(step, port, serv_as): world.farm.servers.reload() servers = [s for s in world.farm.servers if s.status == 'Running'] server = getattr(world, serv_as) port = port.strip() wait_until(world.check_open_port, args=(server, port.strip()), timeout=600, error_text="Port %s is not open" % port) for serv in servers: if serv.id == server.id: continue time.sleep(60) res = world.check_open_port(serv, port.strip()) world.assert_exist( res, 'Port %s is open, but must be closed in server %s' % (port, serv.id))
def assert_check_resolv(step, domain_as, serv_as, timeout=1800): domain = getattr(world, domain_as) serv = getattr(world, serv_as) def check_new_ip(domain_name, ip): try: actual_ip = wait_until(world.check_resolving, args=(domain_name,), timeout=timeout, error_text="Not see domain resolve") except Exception: return False if ip == actual_ip: return True else: LOG.debug('Actual IP is not server IP: %s != %s' % (actual_ip, ip)) return False wait_until(check_new_ip, args=(domain.name, serv.public_ip), timeout=timeout, error_text="Domain resolve not new IP")
def assert_expect_server(step, role, serv_as): spec = 'running' world.farm.roles.reload() role_id = filter(lambda x: role.strip() in x.role.behaviors, world.farm.roles)[0] role_id = role_id.role_id server = wait_until(world.check_server_status, args=(spec, role_id, True), timeout=1500, error_text="I'm not see this %s state in server" % spec) setattr(world, serv_as, server)
def assert_build_completed(step): try: wait_until(world.bundle_task_complete_rolebuilder, args=(world.bundle_id,), timeout=2000, error_text='Bundletask %s is not completed' % world.bundle_id) except BaseException as e: rolebuilder_server = world.rolebuilder_server test_name = step.scenario.described_at.file.split('/')[-1].split('.')[0] path = os.path.realpath(os.path.join(CONF.main.log_path, 'scalarizr', test_name, world.test_start_time.strftime('%m%d-%H:%M'), step.scenario.name.replace('/', '-'), rolebuilder_server.id + '-role-builder.log.gz')) LOG.debug('Path to save log: %s' % path) if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path), 0755) rolebuilder_server.get_logs('../role-builder.log', path, compress=True) rolebuilder_server.terminate() raise e
def assert_check_resolv(step, domain_as, serv_as, timeout=1800): domain = getattr(world, domain_as) server = getattr(world, serv_as) domain_ip = wait_until(world.check_resolving, args=(domain.name, ), timeout=timeout, error_text='Domain: {0} not resolve'.format( domain.name)) assert domain_ip == server.public_ip, 'Domain IP {0} != server IP {1}'.format( domain_ip, server.public_ip)
def assert_check_resolv(step, domain_as, serv_as, timeout=1800): domain = getattr(world, domain_as) server = getattr(world, serv_as) domain_ip = wait_until(world.check_resolving, args=(domain.name,), timeout=timeout, error_text='Domain: {0} not resolve'.format(domain.name)) assert domain_ip == server.public_ip, 'Domain IP {0} != server IP {1}'.format( domain_ip, server.public_ip)
def check_cp(step): LOG.info('Check control panel work') detail = wait_until(world.wait_rabbitmq_cp_url, timeout=1000, error_text="Not see detail to rabbitmq panel") req = urllib2.Request(detail['url'].replace('\\', '')) code = base64.encodestring('%s:%s' % ('scalr', detail['password']))[:-1] req.add_header("Authorization", "Basic %s" % code) p = urllib2.urlopen(req) page = p.read() LOG.debug('Control panel page content: %s' % page) world.assert_not_in('RabbitMQ Management', page, 'Control panel not work')
def verify_backends_for_port(step, serv_as, port, has_not, backends_servers): time.sleep(10) LOG.info("Verify backends servers in config") haproxy_server = getattr(world, serv_as) port = int(port) LOG.info("Backend port: %s" % port) backends = [] for back in backends_servers.split(','): if back.strip().startswith("'"): new_back = back.strip().replace("'", '').split() if ':' in new_back[0]: host, backend_port = new_back[0].split(':') else: host = new_back[0] backend_port = port hostname = getattr(world, host, host) if not isinstance(hostname, (unicode, str)): hostname = hostname.private_ip if new_back[1] == 'default': backends.append(re.compile('%s:%s' % (hostname, backend_port))) else: backends.append(re.compile('%s:%s(?: check)? %s' % (hostname, backend_port, new_back[1]))) elif ':' in back.strip(): serv, network = back.strip().split(':') hostname = getattr(world, serv, serv) if not isinstance(hostname, (unicode, str)): if network == 'public': hostname = hostname.public_ip elif network == 'private': hostname = hostname.private_ip backends.append(re.compile('%s:%s' % (hostname, port))) else: hostname = getattr(world, back.strip(), back.strip()) if not isinstance(hostname, (unicode, str)): hostname = hostname.private_ip backends.append(re.compile('%s:%s' % (hostname, port))) LOG.info("Will search backends: %s" % backends) node = world.cloud.get_node(haproxy_server) config = wait_until(check_config_for_option, args=[node, 'backends', port], timeout=180, error_text='No backends section in HAProxy config') for backend in backends: for server in config['backends'][port]: if not server.startswith('server'): continue if has_not and backend.search(server): raise AssertionError("Backend '%s' in backends file (%s) for port '%s'" % (backend, server, port)) elif not has_not: if backend.search(server): break else: if not has_not: raise AssertionError("Backend '%s' not found in backends (%s) file for port '%s'" % (backend.pattern, config['backends'][port], port))
def app_server_should_be_clean(step, serv_as): server = getattr(world, serv_as) node = world.cloud.get_node(server) ip = '127.0.0.1:80' LOG.info('Check if default ip is in list') wait_until(world.wait_upstream_in_config, args=(node, ip), timeout=180, error_text="Server %s (%s) not in upstream list" % (server.id, ip)) out = node.run('cat /etc/nginx/app-servers.include')[0] ips = re.findall(r"((?:\d+\.?){4}:\d+)", out) if not len(ips) == 1: raise AssertionError( 'In default app-servers.include must be only one host, but it: %s (%s)' % (len(ips), ips)) if not ips[0] == ip: raise AssertionError( 'First host in default app-server.include is not localhost, it: %s' % ips)
def rebundle_server_via_api(step, serv_as): """Start rebundle for server via scalarizr api""" server = getattr(world, serv_as) operation_id = None name = 'tmp-%s-%s' % (server.role.name, datetime.now().strftime('%m%d%H%M')) setattr(world, 'last_bundle_role_name', name) LOG.info( 'Create image via scalarizr api from server %s and image name %s' % (server.id, name)) if CONF.feature.driver.current_cloud in (Platform.EC2, Platform.GCE)\ and not CONF.feature.dist.is_windows\ and not CONF.feature.dist.dist == 'redhat'\ or (CONF.feature.driver.current_cloud == Platform.GCE and CONF.feature.dist.dist == 'redhat'): LOG.info('Image creation in this platform doing in one step') operation_id = server.api.image.create(name=name, async=True) LOG.info('Image creation operation_id - %s' % operation_id) if not operation_id: raise AssertionError( 'Api doesn\'t return operation id for this api call!') LOG.info('Wait up to 1 hour before image will be created') rebundle_result = wait_until(check_rebundle_api_finished, args=(server, operation_id), timeout=3600, logger=LOG) LOG.info('Rebundle is finished, api return: %s' % rebundle_result) setattr(world, 'api_image_id', rebundle_result['image_id']) else: LOG.info('Prepare server for image creation') prepare = server.api.image.prepare() LOG.debug('Prepare operation result: %s' % prepare) if CONF.feature.driver.current_cloud in (Platform.IDCF, Platform.CLOUDSTACK): node = world.cloud.get_node(server) volume = filter(lambda x: x.extra['instance_id'] == node.id, world.cloud.list_volumes()) snapshot = world.cloud._driver._conn.create_volume_snapshot( volume[0]) # 99 because this is Other Linux 64-bit in default cloudstack image_id = world.cloud._driver._conn.ex_create_snapshot_template( snapshot, 'tmp-revizor-%s' % datetime.now().strftime('%m%d%H%M'), 99).id else: image_id = world.cloud.create_template( world.cloud.get_node(server), name).id LOG.info('New image_id: %s' % image_id) setattr(world, 'api_image_id', image_id) LOG.info('Finalize server after rebundle') server.api.image.finalize()
def check_new_ip(domain_name, ip): try: actual_ip = wait_until(world.check_resolving, args=(domain_name,), timeout=timeout, error_text="Not see domain resolve") except Exception: return False if ip == actual_ip: return True else: LOG.debug('Actual IP is not server IP: %s != %s' % (actual_ip, ip)) return False
def check_new_ip(domain_name, ip): try: actual_ip = wait_until(world.check_resolving, args=(domain_name, ), timeout=timeout, error_text="Not see domain resolve") except Exception: return False if ip == actual_ip: return True else: LOG.debug('Actual IP is not server IP: %s != %s' % (actual_ip, ip)) return False
def assert_check_resolv(step, domain_as, serv_as, timeout=1800): domain = getattr(world, domain_as) server = getattr(world, serv_as) def check_new_ip(domain_name, ip): try: actual_ip = wait_until(world.check_resolving, args=(domain_name, ), timeout=timeout, error_text="Not see domain resolve") except Exception: return False if ip == actual_ip: return True else: LOG.debug('Actual IP is not server IP: %s != %s' % (actual_ip, ip)) return False wait_until(check_new_ip, args=(domain.name, server.public_ip), timeout=timeout, error_text="Domain resolve not new IP")
def is_scalarizr_connected(step, timeout=1400): LOG.info('Establish connection with scalarizr.') #Whait outbound request from scalarizr res = wait_until(IMPL.bundle.check_scalarizr_connection, args=(world.server.id, ), timeout=timeout, error_text="Time out error. Can't establish connection with scalarizr.") if not res['failure_reason']: world.bundle_task_id = res['bundle_task_id'] if not res['behaviors']: world.behaviors = ['base'] elif 'base' not in res['behaviors']: world.behaviors = ','.join((','.join(res['behaviors']), 'base')).split(',') else: world.behaviors = res['behaviors'] LOG.info('Connection with scalarizr was established. Received the following behaviors: %s' % world.behaviors) else: raise AssertionError("Can't establish connection with scalarizr. Original error: %s" % res['failure_reason'])
def assert_port(step, port, state, serv_as): if serv_as == 'all': world.farm.servers.reload() servers = [s for s in world.farm.servers if s.status == 'Running'] else: servers = [getattr(world, serv_as), ] state = state.strip() state = False if state == 'not' else True for serv in servers: time.sleep(60) if state: res = wait_until(world.check_open_port, args=(serv, port.strip()), timeout=600, error_text="Port %s is not open" % port) else: res = world.check_open_port(serv, port.strip()) world.assert_not_equal(state, res, 'Port %s is %s, but must %s in server %s' % (port, res, state, serv.id))
def assert_volume_snapshot_created(volume_snapshot_id: str): def is_snapshot_completed(**kwargs): status = get_platform_backend_tools().list_snapshots(**kwargs)[0]['status'] LOG.info('Wait for volume snapshot completed, actual state is: %s ' % status) return status.lower() in ["completed", "succeeded"] if CONF.feature.platform.is_azure: snapshot_kwargs = dict(query=volume_snapshot_id) elif CONF.feature.platform.is_ec2: snapshot_kwargs = dict(snapshot_id=volume_snapshot_id) snapshot_kwargs.update({'cloud_location': CONF.feature.platform.location}) assert wait_until( is_snapshot_completed, kwargs=snapshot_kwargs, timeout=600, logger=LOG, return_bool=True), 'Volume snapshot creation failed'
def add_objects(step, obj, serv_as): """ Insert data to RabbitMQ server """ serv = getattr(world, serv_as) node = world.cloud.get_node(serv) password = wait_until( world.wait_rabbitmq_cp, timeout=360, error_text="Not see detail to rabbitmq panel")['password'] setattr(world, 'rabbitmq_password', password) LOG.info('Rabbitmq password: %s' % password) port = 5672 if CONF.feature.driver.current_cloud in [ Platform.IDCF, Platform.CLOUDSTACK ]: port = world.cloud.open_port(node, port) if obj == 'user': node.run('rabbitmqctl add_user testuser testpassword') LOG.info('Add user scalr to rabbitmq') elif obj == 'vhost': node.run('rabbitmqctl add_vhost testvhost') LOG.info('Add vhost "testvhost" to rabbitmq') elif obj == 'queue': credentials = pika.PlainCredentials('scalr', password) connection = pika.BlockingConnection( pika.ConnectionParameters(credentials=credentials, host=str(serv.public_ip), port=int(port))) channel = connection.channel() channel.queue_declare(queue='test_queue', durable=True) LOG.info('Add queue "test_queue" to rabbitmq') elif obj == 'message': credentials = pika.PlainCredentials('scalr', password) connection = pika.BlockingConnection( pika.ConnectionParameters(credentials=credentials, host=str(serv.public_ip), port=int(port))) channel = connection.channel() channel.basic_publish(exchange='', routing_key='test_queue', body='super test message', properties=pika.BasicProperties( delivery_mode=2, )) LOG.info('Add message to rabbitmq')
def wait_bundle_complete(server: Server, bundle_id: int) -> int: def check_bundle_complete(): server.bundlelogs.reload() for bundlelog in server.bundlelogs: if bundlelog.id == bundle_id: contents = bundlelog.contents for log in contents: if 'Bundle task status: success' in log['message']: for l in contents: if 'Role ID:' in l['message']: role_id = re.findall(r"Role ID: ([\d]+)", l['message'])[0] LOG.info(f'Bundle task {bundle_id} is complete. New role id: {role_id}') return role_id elif 'Bundle task status changed to: failed' in log['message']: raise AssertionError(log['message']) return False return wait_until(check_bundle_complete, timeout=1800, error_text=f'Bundle task {bundle_id} not finished after 30 minutes')
def assert_role_task_created(step, timeout=1400): res = wait_until(IMPL.bundle.assert_role_task_created, args=(world.bundle_task_id, ), timeout=timeout, error_text="Time out error. Can't create role with sent behaviors: $s." % CONF.main.behaviors) if res['failure_reason']: raise AssertionError("Can't create role. Original error: %s" % res['failure_reason']) LOG.info('New role was created successfully with Role_id: %s.' % res['role_id']) world.new_role_id = res['role_id'] #Remove port forward rule for Cloudstack if CONF.main.driver in [Platform.CLOUDSTACK, Platform.IDCF, Platform.KTUCLOUD]: LOG.info('Deleting a Port Forwarding Rule. IP:%s, Port:%s' % (world.forwarded_port, world.ip)) if not world.cloud.close_port(world.cloud_server, world.forwarded_port, ip=world.ip): raise AssertionError("Can't delete a port forwarding Rule.") LOG.info('Port Forwarding Rule was successfully removed.') #Destroy virtual machine in Cloud LOG.info('Destroying virtual machine %s in Cloud' % world.cloud_server.id) if not world.cloud_server.destroy(): raise AssertionError("Can't destroy node with id: %s." % world.cloud_server.id) LOG.info('Virtual machine was successfully destroyed.')
def validate_containers(server: Server, client: docker.APIClient): server_containers = get_server_containers(server, client) scalr_containers = [] start_time = time.time() while time.time() < (start_time + 300): scalr_containers = wait_until( IMPL.containers.list, args={'server_id': server.id}, timeout=120, logger=LOG, error_text="No docker containers were found on Scalr for server %s" % server.id) if len(server_containers) == len(scalr_containers): break else: time.sleep(10) for serv_container in server_containers: for scalr_container in scalr_containers: if serv_container['containerId'] == scalr_container['containerId']: assert serv_container == scalr_container, \ "Containers don't match! Server: \n{}\nScalr: \n{}".format(serv_container, scalr_container)
def assert_port(step, port, state, serv_as): if serv_as == 'all': world.farm.servers.reload() servers = [s for s in world.farm.servers if s.status == 'Running'] else: servers = [ getattr(world, serv_as), ] state = state.strip() state = False if state == 'not' else True for serv in servers: time.sleep(60) if state: res = wait_until(world.check_open_port, args=(serv, port.strip()), timeout=600, error_text="Port %s is not open" % port) else: res = world.check_open_port(serv, port.strip()) world.assert_not_equal( state, res, 'Port %s is %s, but must %s in server %s' % (port, res, state, serv.id))
def check_database_in_new_server(step, serv_as, has_not, db_name, username=None): has_not = has_not and True or False time.sleep(5) db_role = world.get_role() dbs = db_name.split(',') if serv_as == 'all': world.farm.servers.reload() servers = filter(lambda s: s.status == ServerStatus.RUNNING, world.farm.servers) else: servers = [getattr(world, serv_as)] credentials = (username, db_role.db.credentials[username]) if username else None if CONF.feature.driver.cloud_family == Platform.CLOUDSTACK: for i in range(5): try: db_role.db.get_connection() break except DataBaseError: time.sleep(5) else: raise for server in servers: for db in dbs: LOG.info('Check database %s in server %s' % (db, server.id)) check = wait_until(target=db_role.db.database_exist, args=(str(db), server, credentials), timeout=60, return_bool=True) world.assert_not_equal(check, not has_not, ( has_not and 'Database %s exist in server %s, but must be erased. All db: %s' or 'Database %s not exist in server %s, all db: %s') % (db_name, server.id, db_role.db.database_list(server)))
def verify_containers(step, serv_as): server = getattr(world, serv_as) server_containers = get_server_containers(serv_as) start_time = time.time() while time.time() < (start_time + 300): scalr_containers = wait_until( IMPL.containers.list, args={'server_id': server.id}, timeout=120, logger=LOG, error_text="No docker containers were found on Scalr for server %s" % server.id) if len(server_containers) == len(scalr_containers): break else: time.sleep(10) for serv_container in server_containers: for scalr_container in scalr_containers: if serv_container['containerId'] == scalr_container['containerId']: assert serv_container == scalr_container,\ "Containers don't match! Server: \n{}\nScalr: \n{}".format(serv_container, scalr_container)
def verify_containers(step, serv_as): # NOTE: migrated server = getattr(world, serv_as) server_containers = get_server_containers(serv_as) start_time = time.time() while time.time() < (start_time + 300): scalr_containers = wait_until( IMPL.containers.list, args={'server_id': server.id}, timeout=120, logger=LOG, error_text="No docker containers were found on Scalr for server %s" % server.id) if len(server_containers) == len(scalr_containers): break else: time.sleep(10) for serv_container in server_containers: for scalr_container in scalr_containers: if serv_container['containerId'] == scalr_container['containerId']: assert serv_container == scalr_container,\ "Containers don't match! Server: \n{}\nScalr: \n{}".format(serv_container, scalr_container)
def verify_replication_status(step, behavior, status): wait_until(world.wait_replication_status, args=(behavior, status), error_text="Replication in broken", timeout=600)
def check_cluster_terminate(step, status): wait_until(world.check_mongo_status, args=(status, ), timeout=1800, error_text='Mongodb cluster status not %s, is: %s' % (status, world.farm.db_info('mongodb')['status']))
def installing_scalarizr(step, custom_version=None, use_sysprep=None, serv_as=None, use_rv_to_branch=None, custom_branch=None): node = getattr(world, 'cloud_server', None) resave_node = True if node else False rv_branch = CONF.feature.branch rv_to_branch = CONF.feature.to_branch server = getattr(world, (serv_as or '').strip(), None) if server: server.reload() # Get scalarizr repo type if use_rv_to_branch: branch = rv_to_branch elif custom_branch: branch = custom_branch else: branch = rv_branch repo_type = get_repo_type(branch, custom_version) LOG.info('Installing scalarizr from repo_type: %s' % repo_type) # Windows handler if CONF.feature.dist.is_windows: password = '******' if node: console_kwargs = dict(public_ip=node.public_ips[0], password=password) else: console_kwargs = dict(server=server) if CONF.feature.driver.is_platform_ec2: console_kwargs.update({'password': password}) LOG.debug('Cloud server not found get node from server') node = wait_until(world.cloud.get_node, args=(server, ), timeout=300, logger=LOG) LOG.debug('Node get successfully: %s' % node) # Wait ssh console_kwargs.update({'timeout': 1200}) # Install scalarizr url = 'https://my.scalr.net/public/windows/{repo_type}'.format( repo_type=repo_type) cmd = "iex ((new-object net.webclient).DownloadString('{url}/install_scalarizr.ps1'))".format( url=url) assert not world.run_cmd_command_until( world.PS_RUN_AS.format(command=cmd), ** console_kwargs).std_err, "Scalarizr installation failed" res = world.run_cmd_command_until('scalarizr -v', **console_kwargs).std_out if use_sysprep: run_sysprep(node.uuid, world.get_windows_session(**console_kwargs)) # Linux handler else: # Wait cloud server if not node: LOG.debug('Cloud server not found get node from server') node = wait_until(world.cloud.get_node, args=(server, ), timeout=300, logger=LOG) LOG.debug('Node get successfully: %s' % node) # Wait ssh start_time = time.time() while (time.time() - start_time) < 300: try: if node.get_ssh(): break except AssertionError: LOG.warning('Can\'t get ssh for server %s' % node.id) time.sleep(10) url = 'https://my.scalr.net/public/linux/{repo_type}'.format( repo_type=repo_type) cmd = '{curl_install} && ' \ 'curl -L {url}/install_scalarizr.sh | bash && sync'.format( curl_install=world.value_for_os_family( debian="apt-get update && apt-get install curl -y", centos="yum clean all && yum install curl -y", server=server, node=node ), url=url) LOG.debug('Install script body: %s' % cmd) node.run(cmd) if CONF.feature.core_v2: cv2_init = 'touch /etc/scalr/private.d/scalr_labs_corev2' LOG.info('Init scalarizr corev2. Run command %s' % cv2_init) node.run(cv2_init) # get installed scalarizr version res = node.run('scalarizr -v')[0] scalarizr_ver = re.findall('(?:Scalarizr\s)([a-z0-9/./-]+)', res) assert scalarizr_ver, 'Scalarizr version is invalid. Command returned: %s' % res setattr(world, 'pre_installed_agent', scalarizr_ver[0]) if resave_node: setattr(world, 'cloud_server', node) LOG.debug('Scalarizr %s was successfully installed' % scalarizr_ver[0])
def check_grow_status(step, status): LOG.debug('Check grow status') wait_until(wait_grow_status, args=(status.strip(), ), timeout=900, error_text='Not see grow status %s' % status)
def wait_all_terminated(step): """Wait termination of all servers""" wait_until(world.wait_farm_terminated, timeout=1800, error_text='Servers in farm not terminated too long')
def assert_bundletask_completed(step, serv_as, timeout=1800): server = getattr(world, serv_as) wait_until(world.bundle_task_completed, args=(server, world.bundle_id), timeout=timeout, error_text="Bundle not completed")