def cleanup_environment(env_id): env = objects.Environment(env_id) controller = env_util.get_one_controller(env) sftp = ssh.sftp(controller) admin_pass = env_util.get_admin_password(env, controller) script_filename = 'clean_env.py' with ssh.tempdir(controller) as tempdir: script_src_filename = os.path.join( magic_consts.CWD, "helpers", script_filename) script_dst_filename = os.path.join(tempdir, script_filename) sftp.put(script_src_filename, script_dst_filename) command = [ 'sh', '-c', '. /root/openrc; export OS_PASSWORD={0}; python {1}' .format(admin_pass, script_dst_filename), ] with ssh.popen(command, node=controller, stdin=ssh.PIPE) as proc: roles = ["controller", "compute"] for node in env_util.get_nodes(env, roles): data = "{0}\n{1}\n".format(node.data['fqdn'].split('.')[0], node.data['fqdn']) proc.stdin.write(data)
def get_databases(env): node = env_util.get_one_controller(env) with ssh.popen( ['mysql', '--batch', '--skip-column-names'], stdin=ssh.PIPE, stdout=ssh.PIPE, node=node) as proc: proc.stdin.write('SHOW DATABASES') out = proc.communicate()[0] return out.splitlines()
def mysqldump_from_env(env, dbs, fname): node = env_util.get_one_controller(env) cmd = [ 'bash', '-c', 'set -o pipefail; ' + # We want to fail if mysqldump fails 'mysqldump --add-drop-database --lock-all-tables ' '--databases {0}'.format(' '.join(dbs)) + ' | gzip', ] with ssh.popen(cmd, stdout=ssh.PIPE, node=node) as proc: with open(fname, 'wb') as local_file: shutil.copyfileobj(proc.stdout, local_file)
def mysqldump_from_env(env, role_name, dbs, fname): node = env_util.get_one_node_of(env, role_name) cmd = [ 'bash', '-c', 'set -o pipefail; ' + # We want to fail if mysqldump fails 'mysqldump --add-drop-database --lock-all-tables ' '--databases {0}'.format(' '.join(dbs)) + ' | gzip', ] with ssh.popen(cmd, stdout=ssh.PIPE, node=node) as proc: with open(fname, 'wb') as local_file: shutil.copyfileobj(proc.stdout, local_file)
def mysqldump_from_env(env): node = env_util.get_one_controller(env) local_fname = os.path.join(magic_consts.FUEL_CACHE, 'dbs.original.sql.gz') with ssh.popen(['sh', '-c', 'mysqldump --add-drop-database' ' --lock-all-tables --databases %s | gzip' % (' '.join(magic_consts.OS_SERVICES),)], stdout=ssh.PIPE, node=node) as proc: with open(local_fname, 'wb') as local_file: shutil.copyfileobj(proc.stdout, local_file) local_fname2 = os.path.join( magic_consts.FUEL_CACHE, 'dbs.original.cluster_%s.sql.gz' % (env.data['id'],), ) shutil.copy(local_fname, local_fname2) return local_fname
def mysqldump_from_env(env): node = env_util.get_one_controller(env) local_fname = os.path.join(magic_consts.FUEL_CACHE, 'dbs.original.sql.gz') cmd = [ 'bash', '-c', 'set -o pipefail; ' + # We want to fail if mysqldump fails 'mysqldump --add-drop-database --lock-all-tables ' '--databases {0}'.format(' '.join(magic_consts.OS_SERVICES)) + ' | gzip', ] with ssh.popen(cmd, stdout=ssh.PIPE, node=node) as proc: with open(local_fname, 'wb') as local_file: shutil.copyfileobj(proc.stdout, local_file) local_fname2 = os.path.join( magic_consts.FUEL_CACHE, 'dbs.original.cluster_%s.sql.gz' % (env.data['id'],), ) shutil.copy(local_fname, local_fname2) return local_fname
def fix_neutron_migrations(node): add_networksecuritybindings_sql = \ "INSERT INTO networksecuritybindings " \ "SELECT id, 1 " \ "FROM networks " \ "WHERE id NOT IN (SELECT network_id FROM networksecuritybindings);" update_network_segments_sql = \ "UPDATE ml2_network_segments " \ "SET network_type='flat',physical_network='physnet1' " \ "WHERE network_id IN (SELECT network_id FROM externalnetworks);" insert_physnet1 = \ "INSERT INTO ml2_flat_allocations " \ "SELECT b.* FROM (SELECT 'physnet1') AS b " \ "WHERE NOT EXISTS (" \ "SELECT 1 FROM ml2_flat_allocations " \ "WHERE physical_network = 'physnet1'" \ ");" cmd = ['sudo', '-iu', 'root', 'mysql', 'neutron'] with ssh.popen(cmd, node=node, stdin=ssh.PIPE) as proc: proc.stdin.write(add_networksecuritybindings_sql) proc.stdin.write(insert_physnet1) proc.stdin.write(update_network_segments_sql)
def cleanup_environment(env_id): env = objects.Environment(env_id) controller = env_util.get_one_controller(env) sftp = ssh.sftp(controller) script_filename = 'clean_env.py' script_dst_filename = '/tmp/{0}'.format(script_filename) sftp.put( os.path.join(magic_consts.CWD, "helpers/{0}".format(script_filename)), script_dst_filename, ) command = ['sh', '-c', '. /root/openrc; export OS_PASSWORD=admin; python ' + script_dst_filename] with ssh.popen(command, node=controller, stdin=ssh.PIPE) as proc: roles = ["controller", "compute"] for node in env_util.get_nodes(env, roles): proc.stdin.write(node.data['fqdn']+"\n") ssh.call(['rm', '-f', script_dst_filename], node=controller)
def patch_mcollective(node): with open(magic_consts.MCOLLECTIVE_PATCH) as p: cmd = ['patch', '-Np2', magic_consts.MCOLLECTIVE_PATCH_TARGET] with ssh.popen(cmd, node=node, stdin=ssh.PIPE) as proc: shutil.copyfileobj(p, proc.stdin) ssh.call(['service', 'mcollective', 'restart'], node=node)
def untar_files(filename, node): cmd = ['tar', '-xzv', '-C', '/'] with ssh.popen(cmd, stdin=ssh.PIPE, node=node) as proc: with open(filename, 'rb') as f: shutil.copyfileobj(f, proc.stdin)
def tar_files(filename, node, *files): cmd = ['tar', '-czvP'] cmd.extend(files) with ssh.popen(cmd, stdout=ssh.PIPE, node=node) as proc: with open(filename, 'wb') as f: shutil.copyfileobj(proc.stdout, f)
def mysqldump_restore_to_env(env, fname): node = env_util.get_one_controller(env) with open(fname, 'rb') as local_file: with ssh.popen(['sh', '-c', 'zcat | mysql'], stdin=ssh.PIPE, node=node) as proc: shutil.copyfileobj(local_file, proc.stdin)
def is_ceph_up(controller): with ssh.popen(['ceph', 'osd', 'tree', '-f', 'json'], node=controller, stdout=ssh.PIPE) as proc: data = json.load(proc.stdout) return all(n['status'] == 'up' for n in data['nodes'] if n['type'] == 'osd')
def mysqldump_restore_to_env(env, role_name, fname): node = env_util.get_one_node_of(env, role_name) with open(fname, 'rb') as local_file: with ssh.popen(['sh', '-c', 'zcat | sudo -iu root mysql'], stdin=ssh.PIPE, node=node) as proc: shutil.copyfileobj(local_file, proc.stdin)