def create_databundle(step, bundle_type, when): LOG.info('Create a %s databundle on %s' % (bundle_type, when)) if when == 'slave': use_slave = True else: use_slave = False world.get_role().db.create_databundle(bundle_type, use_slave=use_slave)
def add_proxy_to_role(step, proxy_name, proxy_role, port, backend_role): LOG.info("Add haproxy proxy %s with role backend" % proxy_name) proxy_role = world.get_role(proxy_role) backend_role = world.get_role(backend_role) backends = [{ 'farm_role_id': backend_role.id, 'port': str(port), 'backup': '0', 'down': '0' }] LOG.info("Save proxy %s with backends: %s" % (proxy_name, backends)) proxy_role.add_haproxy_proxy(port, backends, description=proxy_name) setattr(world, '%s_proxy' % proxy_name, {"port": port, "backends": backends})
def assert_check_databundle_date(step, back_type): LOG.info("Check %s date" % back_type) if CONF.feature.driver.current_cloud in [Platform.CLOUDSTACK, Platform.IDCF, Platform.KTUCLOUD]: LOG.info('Platform is cloudstack-family, backup not doing') return True info = world.get_role().db.info() if info['last_%s' % back_type] == 'In progress...': while world.get_role().db.info()['last_%s' % back_type] == 'In progress...': LOG.debug('Last %s in progress, wait 10 seconds' % back_type) time.sleep(10) if not info['last_%s' % back_type] == getattr(world, 'last_%s' % back_type, 'Never'): return else: raise AssertionError('Previous %s was: %s and last: %s' % (back_type, getattr(world, 'last_%s' % back_type, 'Never'), info['last_%s' % back_type]))
def add_proxy_with_healtcheck(step, proxy_name, proxy_role, port, options, healthchecks): LOG.info("Add proxy %s with many backends (%s) and healthcheck (%s)" % (proxy_name, options, healthchecks)) proxy_role = world.get_role(proxy_role) options = options.strip().replace('\'', '').split() options = zip(*[options[i::2] for i in range(2)]) healthchecks = [int(x.strip()) for x in healthchecks.replace(',', '').split()] LOG.info("Healthchecks for proxy: %s" % healthchecks) backends = [] for o in options: if ':' in o[0]: host, backend_port = o[0].split(':') else: host = o[0] backend_port = port serv = getattr(world, host, None) backends.append({ 'host': serv.private_ip if serv else str(o[0]), 'port': str(backend_port), 'backup': "1" if o[1] == 'backup' else "0", 'down': "1" if o[1] in ['down', 'disabled'] else "0", }) LOG.info("Save proxy %s with backends: %s" % (proxy_name, backends)) proxy_role.add_haproxy_proxy(port, backends, description=proxy_name, interval=healthchecks[0], fall=healthchecks[1], rise=healthchecks[2]) setattr(world, '%s_proxy' % proxy_name, {"port": port, "backends": backends})
def delete_proxy(step, proxy_name, proxy_role): proxy = getattr(world, '%s_proxy' % proxy_name) role = world.get_role(proxy_role) if 'www' in role.role.behaviors: role.delete_nginx_proxy(proxy['hostname']) elif 'haproxy' in role.role.behaviors: role.delete_haproxy_proxy(proxy['port'])
def attach_script(step, script_name): scripts = IMPL.script.list() role = world.get_role() res = filter(lambda x: x['name'] == script_name, scripts)[0] LOG.info('Add script %s to custom event %s' % (res['name'], world.last_event['name'])) IMPL.farm.edit_role(world.farm.id, role.role.id, scripting=[ { "scope": "farmrole", "action": "add", # id: extModel123 # eventOrder 2 "timeout": "1200", "isSync": True, "orderIndex": 10, "type": "scalr", "isActive": True, "eventName": world.last_event['name'], "target": { "type": "server" }, "isFirstConfiguration": None, "scriptId": str(res['id']), "scriptName": res['name'], "scriptOs": "linux", "version": -1, "scriptPath": "", "runAs": "" }] )
def waiting_server(step, state, serv_as, timeout=1400): if CONF.feature.dist.is_windows: timeout = 2400 role = world.get_role() server = world.wait_server_bootstrapping(role, state, timeout) LOG.info('Server succesfully %s' % state) setattr(world, serv_as, server)
def verify_attached_disk_types(step): LOG.info('Verify atype of attached disks') role = world.get_role() storage_config = IMPL.farm.get_role_settings(world.farm.id, role.role.id)['storages'] volume_ids = {} for device in storage_config['configs']: volume_ids[device['mountPoint']] = [ s['storageId'] for s in storage_config['devices'][device['id']] ] ids = list(chain.from_iterable(volume_ids.values())) volumes = filter(lambda x: x.id in ids, world.cloud.list_volumes()) for mount_point in volume_ids: volume_ids[mount_point] = filter( lambda x: x.id in volume_ids[mount_point], volumes) LOG.debug('Volumes in mount points: %s' % volume_ids) if CONF.feature.driver.current_cloud == Platform.EC2: LOG.warning( 'In EC2 platform we can\'t get volume type (libcloud limits)') return elif CONF.feature.driver.current_cloud == Platform.GCE: if not volume_ids['/media/diskmount'][0].extra['type'] == 'pd-standard': raise AssertionError( 'Volume attached to /media/diskmount must be "pd-standard" but it: %s' % volume_ids['/media/diskmount'][0].extra['type']) if not volume_ids['/media/raidmount'][0].extra['type'] == 'pd-ssd': raise AssertionError( 'Volume attached to /media/raidmount must be "pd-ssd" but it: %s' % volume_ids['/media/diskmount'][0].extra['type'])
def assert_check_replicaset(step, slaves, shard_index, port): world.farm.servers.reload() db_role = world.get_role() shard_index = int(shard_index) - 1 # Set credentials credentials = {'port': int(port), 'readPreference': 'secondary'} # mongod replicaSet status command command = {'replSetGetStatus': 1} # Get random server from shard for server in world.farm.servers: if server.status == 'Running' and server.role_id == db_role.role_id: if int(server.cluster_position[0]) == shard_index: server = server LOG.info('Found server %s with cluster position %s' % (server.id, server.cluster_position)) break else: raise AssertionError('No servers found in shard: #%s' % shard_index) shard_members = slaves.split(',') # Run command res = db_role.db.run_admin_command(server, command, credentials=credentials) LOG.info('Obtained replica set status from: %s\n%s' % (server.id, res)) # Check result members = set([ member['name'].split('mongo')[1].split(':')[0][1:] for member in res['members'] ]) LOG.info('Members in replicaSet %s are %s' % (shard_index, ','.join(members))) for shard_member in shard_members: world.assert_not_in( shard_member, members, 'Member %s not in replicaset. Members: %s' % (shard_member, members))
def assert_check_replicaset(step, slaves, shard_index, port): world.farm.servers.reload() db_role = world.get_role() shard_index = int(shard_index) - 1 # Set credentials credentials = {'port': int(port), 'readPreference': 'secondary'} # mongod replicaSet status command command = {'replSetGetStatus': 1} # Get random server from shard for server in world.farm.servers: if server.status == 'Running' and server.role_id == db_role.role_id: if int(server.cluster_position[0]) == shard_index: server = server LOG.info('Found server %s with cluster position %s' % (server.id, server.cluster_position)) break else: raise AssertionError('No servers found in shard: #%s' % shard_index) shard_members = slaves.split(',') # Run command res = db_role.db.run_admin_command(server, command, credentials=credentials) LOG.info('Obtained replica set status from: %s\n%s' % (server.id, res)) # Check result members = set([member['name'].split('mongo')[1].split(':')[0][1:] for member in res['members']]) LOG.info('Members in replicaSet %s are %s' % (shard_index, ','.join(members))) for shard_member in shard_members: world.assert_not_in(shard_member, members, 'Member %s not in replicaset. Members: %s' % (shard_member, members))
def assert_wait_servers(step, serv_count): role = world.get_role() serv_count = int(serv_count) timeout = 60 * 15 * serv_count LOG.info('Wait %s servers, timeout %s seconds' % (serv_count, timeout)) wait_until(world.wait_servers_running, args=(role, serv_count), timeout=timeout, error_text='Not see %s servers running' % serv_count)
def check_status(step, serv_as): server = getattr(world, serv_as) db_role = world.get_role() # Set credentials credentials = {'readPreference': 'secondary', 'port': 27018} # mongod replicaSet status command command = {'replSetGetStatus': 1} # Get status res = db_role.db.run_admin_command(server, command, credentials=credentials) master_name = [ member['name'] for member in res['members'] if member['state'] == 1 ][0] LOG.info('Obtained replica set status from: %s\n%s' % (serv_as, res)) # Check status for replica_member in res['members']: if replica_member.get('self', False): if (replica_member['state'] != 2) or (res.get('syncingTo', False) != master_name): raise AssertionError( 'An error occurred while trying to check data.\n' 'ReplicaSet status in Error states: %s or not synced with master: %s.' % (replica_member['stateStr'], master_name)) break else: raise AssertionError( "An error occurred while trying to check data. Can't get replica member %s." % serv_as) LOG.info('ReplicaSet status checked successfully on %s' % serv_as)
def assert_shard_status(step, serv_count): serv_count = int(serv_count) db_role = world.get_role() world.farm.servers.reload() # mongod Shard list command command = {'listShards': 1} # Set credentials credentials = {'port': 27017, 'readPreference': 'primary'} # Get random server from shard for server in world.farm.servers: if server.status == 'Running' and server.role_id == db_role.role_id: if server.cluster_position == '0-0': server = server LOG.info('Found server %s with cluster position %s' % (server.id, server.cluster_position)) break else: raise AssertionError('No servers found with index 0-0') # Run command res = db_role.db.run_admin_command(server, command, credentials=credentials) LOG.info('Obtained Shards list from: %s\n%s' % (server.id, res)) # Check result world.assert_not_equal( serv_count, len(res['shards']), 'Cluster map has not %s shards. Found %s shard' % (len(res['shards']), serv_count)) LOG.info('Cluster map has %s shards. Checked successfully.' % len(res['shards']))
def create_database_user(step, username, serv_as): server = getattr(world, serv_as) password = generate_random_string(12) LOG.info("Create new database user '%s/%s' in server %s" % (username, password, server)) db_role = world.get_role() db_role.db.user_create(username, password, server) db_role.db.credentials[username] = password
def delete_databases(step, databases, serv_as): databases = databases.split(',') server = getattr(world, serv_as) db_role = world.get_role() LOG.info('Delete databases %s in server %s' % (databases, server.id)) for db in databases: LOG.info('Delete database: %s' % db) db_role.db.database_delete(db, server)
def __init__(self, server, db=None): #Get connection object self.server = server self._role = world.get_role('postgresql') self.connection = self._role.db.get_connection(server, db=db) self.cursor = self.connection.cursor() self.db = db self.node = world.cloud.get_node(server)
def do_action(step, action): """ Run databundle or backup process in farm """ action = action.strip() db_role = world.get_role() getattr(db_role.db, 'create_%s' % action)() LOG.info("Create %s" % action)
def create_many_databases(step, db_count, serv_as, username=None): server = getattr(world, serv_as) db_role = world.get_role() credentials = (username, db_role.db.credentials[username]) if username else None for c in range(int(db_count)): db_name = "MDB%s" % c LOG.info("Create database %s in server %s" % (db_name, server)) db_role.db.database_create(db_name, server, credentials)
def create_new_database(step, db_name, serv_as, username=None): server = getattr(world, serv_as) db_role = world.get_role() LOG.info('Create database %s in server %s' % (db_name, server)) credentials = (username, db_role.db.credentials[username]) if username else None db_role.db.database_create(db_name, server, credentials) LOG.info('Database was success created') time.sleep(15)
def __init__(self, server, db=0): #Get connection object self.server = server role = world.get_role('redis') self.connection = role.db.get_connection(server, db=db) self.db = db self.node = world.cloud.get_node(server) self.snapshotting_type = 'aof' if os.environ.get('RV_REDIS_SNAPSHOTTING') in ('aof', None) else 'rdb'
def add_custom_role_to_backend(step, role_type, serv_as): LOG.info("Add %s role to %s scalarizr config" % (role_type, serv_as)) server = getattr(world, serv_as) role = world.get_role(role_type) node = world.cloud.get_node(server) node.run( "sed -i 's/upstream_app_role =/upstream_app_role = %s/g' /etc/scalr/public.d/www.ini" % role.name)
def expect_server_bootstraping_for_role(step, serv_as, role_type, timeout=1800): """Expect server bootstrapping to 'Running' and check every 10 seconds scalarizr log for ERRORs and Traceback""" role = world.get_role(role_type) if role_type else None if CONF.feature.driver.cloud_family in (Platform.CLOUDSTACK, Platform.OPENSTACK): timeout = 3000 LOG.info('Expect server bootstrapping as %s for %s role' % (serv_as, role_type)) server = world.wait_server_bootstrapping(role, ServerStatus.RUNNING, timeout=timeout) setattr(world, serv_as, server)
def having_small_database(step, db_name, serv_as, username=None): server = getattr(world, serv_as) db_role = world.get_role() if username: LOG.info("Create database %s in server %s by user %s" % (db_name, server, username)) setattr(world, 'data_insert_result', db_role.db.insert_data_to_database(db_name, server, (username, db_role.db.credentials[username]))) else: LOG.info("Create database %s in server %s" % (db_name, server)) setattr(world, 'data_insert_result', db_role.db.insert_data_to_database(db_name, server))
def delete_custom_role_from_backend(step, role_type, serv_as): server = getattr(world, serv_as) LOG.info('Delete %s role from %s scalarizr config' % (role_type, server.id)) role = world.get_role(role_type) node = world.cloud.get_node(server) node.run( "sed -i 's/upstream_app_role = %s/upstream_app_role =/g' /etc/scalr/public.d/www.ini" % role.name)
def __init__(self, server, db=0): #Get connection object self.server = server role = world.get_role('redis') self.connection = role.db.get_connection(server, db=db) self.db = db self.node = world.cloud.get_node(server) self.snapshotting_type = 'aof' if os.environ.get( 'RV_REDIS_SNAPSHOTTING') in ('aof', None) else 'rdb'
def add_nginx_proxy_for_role(step, proto, proxy_name, port, proxy_role, vhost_name, backend_role, ip_hash, network_type='private'): """This step add to nginx new proxy to any role with http/https and ip_hash :param proto: Has 3 states: http, https, http/https. If http/https - autoredirect will enabled :type proto: str :param proxy_name: Name for proxy in scalr interface :type proxy_name: str :param proxy_role: Nginx role name :type proxy_role: str :param backend_role: Role name for backend :type backend_role: str :param vhost_name: Virtual host name :type vhost_name: str """ proxy_role = world.get_role(proxy_role) backend_role = world.get_role(backend_role) vhost = getattr(world, vhost_name) opts = {} port = int(port) if port else 80 if proto == 'http': LOG.info('Add http proxy') elif proto == 'https': LOG.info('Add https proxy') opts['ssl'] = True opts['ssl_port'] = 443 opts['cert_id'] = Certificate.get_by_name('revizor-key').id opts['http'] = True elif proto == 'http/https': LOG.info('Add http/https proxy') opts['ssl'] = True opts['ssl_port'] = 443 opts['cert_id'] = Certificate.get_by_name('revizor-key').id if ip_hash: opts['ip_hash'] = True template = get_nginx_default_server_template() LOG.info('Add proxy to app role for domain %s' % vhost.name) backends = [{"farm_role_id": backend_role.id, "port": "80", "backup": "0", "down": "0", "location": "/", "network": network_type}] proxy_role.add_nginx_proxy(vhost.name, port, templates=[template], backends=backends, **opts) setattr(world, '%s_proxy' % proxy_name, {"hostname": vhost.name, "port": port, "backends": backends})
def random_terminates(step, serv_count): dead_index = random.sample(range(9), 5) role = world.get_role() servs = [] for serv in world.farm.servers: if serv.status == 'Running' and serv.role_id == role.role_id: servs.append(serv) for i in dead_index: LOG.info('Terminate server %s' % servs[i].id) servs[i].terminate(force=True)
def add_proxy_to_role(step, proxy_name, proxy_role, port, backend_role, options): LOG.info("Add haproxy proxy %s with role backend" % proxy_name) proxy_template = None proxy_role = world.get_role(proxy_role) backend_role = world.get_role(backend_role) backends = [{ 'farm_role_id': backend_role.id, 'port': str(port), 'backup': '0', 'down': '0' }] if options: if ('public' or 'private') in options: backends[0].update({'network': options.strip().split()[1]}) if 'proxy template' in options: proxy_template = PROXY_TEMPLATE proxy_role.add_haproxy_proxy(port, backends, description=proxy_name, proxy_template=proxy_template) LOG.info("Save proxy %s with backends: %s" % (proxy_name, backends)) setattr(world, '%s_proxy' % proxy_name, {"port": port, "backends": backends, "proxy_template": proxy_template})
def check_state(step, states, revert, port, shard_index): member_states = { 'STARTUP': 0, 'PRIMARY': 1, 'SECONDARY': 2, 'RECOVERING': 3, 'FATAL': 4, 'STARTUP2': 5, 'UNKNOWN': 6, 'ARBITER': 7, 'DOWN': 8, 'ROLLBACK': 9, 'SHUNNED': 10 } world.farm.servers.reload() db_role = world.get_role() revert = True if revert else False state = member_states[states.upper()] shard_index = int(shard_index)-1 # mongod replicaSet status command command = {'replSetGetStatus': 1} # Get random server from shard for server in world.farm.servers: if server.status == 'Running' and server.role_id == db_role.role_id: if int(server.cluster_position[0]) == shard_index: server = server LOG.info('Found server %s with cluster position %s' % (server.id, server.cluster_position)) break else: raise AssertionError('No found servers in shard: #%s' % shard_index) # Check state start_time = time.time() state_is_matched = False while (time.time() - start_time) <= 300: if not state_is_matched: # Set credentials credentials = {'port': 27018} res = db_role.db.run_admin_command(server, command, credentials=credentials) LOG.info('Obtained replica set status from: %s\n%s' % (server.id, res)) for member in res['members']: if member.get('state', 100) == state \ and member.get('name', '').split(':')[-1] == port \ and int(member.get('health', 0)): if revert: raise AssertionError('Found server: %s with state: %s in shard: #%s' % (server.id, states.upper(), shard_index)) state_is_matched = True break time.sleep(10) else: break else: if not revert: raise exceptions.TimeoutError('Timeout: 300 seconds reached. ' 'State %s is not checked in shard: #%s.' % (states.upper(), shard_index)) LOG.info('Server %s state %s. Checked successfully' % (server.id, states.upper()))
def bootstrap_many_servers(step, serv_count, serv_names, role_type, timeout=1400): serv_names = [s.strip() for s in serv_names.split(',')] role = world.get_role(role_type) options = {"scaling.max_instances": int(serv_count) + 1, "scaling.min_instances": int(serv_count)} role.edit(options) for i in range(int(serv_count)): LOG.info('Launch %s server' % (i+1)) server = world.wait_server_bootstrapping(role, ServerStatus.RUNNING, timeout=timeout) LOG.info('Server %s bootstrapping as %s' % (server.id, serv_names[i])) setattr(world, serv_names[i], server)
def create_vhost_to_role(step, ssl, vhost_as, key_name, role_type, domain_as): ssl = True if ssl else False key_name = key_name if key_name else None role = world.get_role(role_type) domain = getattr(world, domain_as) LOG.info('Add new virtual host for role %s, domain %s as %s %s' % (role, domain.name, vhost_as, 'with key {0}'.format(key_name) if key_name else '')) vhost = role.add_vhost(domain.name, document_root='/var/www/%s' % vhost_as, ssl=ssl, cert=key_name) setattr(world, vhost_as, vhost)
def change_branch_in_role_for_system(step, branch, role_type): """Change branch for selected role""" if 'system' in branch: branch = CONF.feature.branch elif not branch.strip(): branch = CONF.feature.to_branch else: branch = branch.strip() LOG.info('Change branch to system for %s role' % role_type) role = world.get_role(role_type) role.edit(options={"user-data.scm_branch": branch})
def wait_data_in_mongodb(step, serv_as, replica, db_name): server = getattr(world, serv_as) db_role = world.get_role() credentials = None # Get randomom collection id = dict(sample(world.data_insert_result.items(), 3)) # Get document from random collection id = dict([(key, sample(value, 10)) for key, value in id.iteritems()]) # Check inserted data in database LOG.info('Random data requested with a replica: ' % id) start_time = time.time() while (time.time() - start_time) <= 600: # Set credentials if replica: credentials = {'readPreference': 'secondary', 'port': 27018} # Get connection connection = db_role.db.get_connection(server, credentials=credentials) LOG.info('Checking data on %s. Connected with %s options.' % (serv_as, credentials)) collection_count = len(id) for collection, objects in id.iteritems(): try: LOG.info( 'Try to get documents: %s from random collection %s.' % (objects, collection)) records_count = connection[db_name][collection].find({ '_id': { '$in': objects } }).count() LOG.info( 'Obtained documents count from random collection %s:%s on %s.' % (collection, records_count, serv_as)) except: raise OperationFailure( 'An error occurred while trying to get collection from %s database.\n' 'Original error: %s' % (db_name, sys.exc_info()[1])) if not records_count: break if records_count != len(objects): raise AssertionError( 'An error occurred while trying to check data.\n' 'Server %s has not data from %s' % (serv_as, objects)) collection_count -= 1 if not collection_count: break time.sleep(5) else: raise exceptions.TimeoutError( 'Timeout: 600 seconds reached.\n' 'Server %s has not all inserted data to %s database.' % (server, db_name)) LOG.info('Random data checked successfully on %s' % serv_as)
def save_timestamp(step, db, serv_as): server = getattr(world, serv_as) db_role = world.get_role() db = db if db else '' db_handler_class = get_db_handler(db_role.db.db_name) LOG.info('Getting database %s backup timestamp for %s server' % (db, db_role.db.db_name)) backup_timestamp = db_handler_class(server, db).get_timestamp() if not backup_timestamp: raise AssertionError('Database %s backup timestamp for %s server is empty.' % (db, db_role.db.db_name)) #Set timestamp to global setattr(world, '%s_backup_timestamp' % db_role.db.db_name, backup_timestamp) LOG.info('Database %s backup timestamp for %s server is: %s' % (db, db_role.db.db_name, backup_timestamp))
def restore_databases(step, databases, serv_as): databases = databases.split(',') server = getattr(world, serv_as) db_role = world.get_role() db_handler_class = get_db_handler(db_role.db.db_name) db_handler = db_handler_class(server) LOG.info('Restoring databases %s in server %s' % (','.join(databases), server.id)) for db in databases: LOG.info('Restore database %s' % db) db_handler.restore('/tmp/dbrestore/', db) LOG.info('Database %s was successfully restored.' % db) LOG.info('All databases: %s was successfully restored.' % ','.join(databases))
def check_database_table(step, db, serv_as, pattern, line_count): #TODO: Support to all databases server = getattr(world, serv_as) db_role = world.get_role() if not db_role.db.database_exist(db, server): raise AssertionError('Database %s not exist in server %s' % (db, server.id)) db_handler_class = get_db_handler(db_role.db.db_name) LOG.info('Getting database %s records count for %s server.' % (db, db_role.db.db_name)) count = db_handler_class(server, db).check_data(pattern) if not int(count) == int(line_count): raise AssertionError('Records count in restored db %s is %s, but must be: %s' % (db, count, line_count)) LOG.info('Records count in restored db %s is: %s this corresponds to the transferred' % (db, count))
def trigger_creation(step, action, use_slave=None): action = action.strip() use_slave = True if use_slave else False db_role = world.get_role() info = db_role.db.info() if action != 'pmaaccess': setattr(world, 'last_%s' % action, info['last_%s' % action]) if action == 'databundle': db_role.db.create_databundle(use_slave) else: getattr(db_role.db, 'create_%s' % action)() LOG.info("I'm trigger %s" % action) time.sleep(180)
def check_timestamp(step, db, serv_as): server = getattr(world, serv_as) db_role = world.get_role() db = db if db else '' db_handler_class = get_db_handler(db_role.db.db_name) LOG.info('Getting database %s new backup timestamp for %s server' % (db, db_role.db.db_name)) timestamp = db_handler_class(server, db).get_timestamp() backup_timestamp = getattr(world, '%s_backup_timestamp' % db_role.db.db_name) if not timestamp == backup_timestamp: raise AssertionError('Timestamp is not equivalent: %s != %s' % (timestamp, backup_timestamp)) #Set timestamp to global LOG.info('Database %s new backup timestamp for %s server is equivalent: %s = %s' % (db, db_role.db.db_name, backup_timestamp, timestamp))
def assert_check_slave(step, slave_serv, master_serv): slave_server = getattr(world, slave_serv) master_server = getattr(world, master_serv) db_role = world.get_role() slaves = db_role.db.get_slaves() master = db_role.db.get_master() for s in slaves: if slave_server.id == s.id: if master_server.id == master.id: return True else: raise AssertionError("Server %s is not master" % master.id) raise AssertionError("Server %s is not slave" % slave_server.id)
def change_branch_in_role_for_system(step, branch=None, role_type=None): """Change branch for selected role""" branch = branch or '' role_type = role_type or '' if 'system' in branch: branch = CONF.feature.branch elif not branch.strip(): branch = CONF.feature.to_branch else: branch = branch.strip() LOG.info('Change branch to system for %s role' % role_type) role = world.get_role(role_type) role.edit(options={"user-data.scm_branch": branch})
def add_new_role_to_farm(step, alias=None): LOG.info('Add rebundled role to farm with alias: %s' % alias) options = getattr(world, 'role_options', {}) scripting = getattr(world, 'role_scripting', []) bundled_role = Role.get(world.bundled_role_id) alias = alias or bundled_role.name if 'redis' in bundled_role.behaviors: options.update({'db.msr.redis.persistence_type': os.environ.get('RV_REDIS_SNAPSHOTTING', 'aof'), 'db.msr.redis.use_password': True}) world.farm.add_role(world.bundled_role_id, options=options, scripting=scripting, alias=alias) world.farm.roles.reload() role = world.get_role(alias) LOG.debug('Save Role object after insert rebundled role to farm as: %s/%s' % (role.id, alias)) setattr(world, '%s_role' % alias, role)
def having_small_database(step, db_name, serv_as, username=None): server = getattr(world, serv_as) db_role = world.get_role() time.sleep(60) if username: LOG.info("Create database %s in server %s by user %s" % (db_name, server, username)) setattr( world, 'data_insert_result', db_role.db.insert_data_to_database( db_name, server, (username, db_role.db.credentials[username]))) else: LOG.info("Create database %s in server %s" % (db_name, server)) setattr(world, 'data_insert_result', db_role.db.insert_data_to_database(db_name, server))
def save_device_for_additional_storage(step, mount_point): role = world.get_role() devices = IMPL.farm.get_role_settings(world.farm.id, role.role.id)['storages'] device = filter(lambda x: x['mountPoint'] == mount_point, devices['configs']) if device: device = device[0]['id'] else: raise AssertionError('Can\'t found device for mount point: %s' % mount_point) device_id = devices['devices'][device][0]['storageId'] LOG.info('Volume Id for mount point "%s" is "%s"' % (mount_point, device_id)) setattr(world, 'device_%s' % mount_point.replace('/', '_'), device_id)
def get_last_backup_url(step): LOG.info('Get last backup date') db_role = world.get_role() last_backup = db_role.db.info()['last_backup'] last_backup = last_backup - timedelta(seconds=last_backup.second) LOG.info('Last backup date is: %s' % last_backup) all_backups = IMPL.services.list_backups(world.farm.id) LOG.info('All backups is: %s' % all_backups) links = IMPL.services.backup_details( all_backups[last_backup]['backupId'])['links'] LOG.info('Backups liks is: %s' % links) if not len(links): raise AssertionError('DB backup details is empty, no links found.') last_backup_url = 's3://%s/manifest.json' % links['1']['path']['dirname'] LOG.info('Last backup URL: %s' % last_backup_url) setattr(world, 'last_backup_url', last_backup_url)
def assert_check_slave(step, slave_serv, master_serv): slave = getattr(world, slave_serv) master = getattr(world, master_serv) db_role = world.get_role() info = db_role.db.info() try: if not info['servers']['master']['serverId'] == master.id: raise AssertionError('Master is not %s' % master_serv) for sl in info['servers']: if sl.startswith('slave'): if info['servers'][sl]['serverId'] == slave.id: return True except IndexError: raise AssertionError("I'm not see replication status") raise AssertionError('%s is not slave, all slaves: %s' % (slave_serv, info['slaves']))
def verify_db_not_exist(step, serv_as): db_role = world.get_role() databases = db_role.db.database_list() if db_role.db.db_name in ['mysql2', 'percona']: map(lambda x: databases.remove(x) if x in databases else None, ['information_schema', 'mysql', 'performance_schema', 'test']) if len(databases) > 0: raise AssertionError('%s role contains databases: "%s"' % (db_role.db.db_name, databases)) elif db_role.db.db_name == 'redis': if databases: raise AssertionError('%s role contains databases: "%s"' % (db_role.db.db_name, databases)) elif db_role.db.db_name == 'postgresql': if len(databases) > 5: raise AssertionError('%s role contains databases: "%s"' % (db_role.db.db_name, databases))
def verify_saved_and_new_volumes(step, mount_point): role = world.get_role() devices = IMPL.farm.get_role_settings(world.farm.id, role.role.id)['storages'] device = filter(lambda x: x['mountPoint'] == mount_point, devices['configs']) if device: device = device[0]['id'] else: raise AssertionError('Can\'t found device for mount point: %s' % mount_point) device_id = devices['devices'][device][0]['storageId'] old_device_id = getattr(world, 'device_%s' % mount_point.replace('/', '_')) if device_id == old_device_id: raise AssertionError( 'Old and new Volume Id for mount point "%s" is equally (%s)' % (mount_point, device))