def commit_prod_data(): if not env.host or env.type != "prod": abort("commit_prod_data should only be called on prod") with cd(env.directory): with prefix(env.activate): run("python manage.py dumpdata sites auth.group --indent=1 > ./initial_data.json") directories = "" for app in settings.PROD_DATA_MODELS: model_labels = [] if app == "sites" or app == "auth": continue fixtures_dir = "./%s/fixtures" % (app) directories += "%s/* " % fixtures_dir with fabric_settings(warn_only=True): run("mkdir %s" % (fixtures_dir)) for model in settings.PROD_DATA_MODELS[app]: model_labels.append("%s.%s" % (app, model)) with fabric_settings(warn_only=True): run("python manage.py file_cleanup %s.%s" % (app, model)) run("python manage.py dumpdata %s --indent=1 > %s/initial_data.json" % (" ".join(model_labels), fixtures_dir)) run("python copy_media.py prod out") run("git add ./initial_data.json ./prod_data/* %s" % directories) with fabric_settings(warn_only=True): run('git commit -m "Prod data commit from prod."') run("git push origin master")
def teardown( self, instance # THE boto INSTANCE OBJECT FOR THE MACHINE TO TEARDOWN ): with self.locker: self.instance = instance Log.note("teardown {{instance}}", instance=instance.id) self._config_fabric(instance) # ASK NICELY TO STOP Elasticsearch PROCESS with fabric_settings(warn_only=True): sudo("supervisorctl stop es") # ASK NICELY TO STOP "supervisord" PROCESS with fabric_settings(warn_only=True): sudo( "ps -ef | grep supervisord | grep -v grep | awk '{print $2}' | xargs kill -SIGINT" ) # WAIT FOR SUPERVISOR SHUTDOWN pid = True while pid: with hide('output'): pid = sudo( "ps -ef | grep supervisord | grep -v grep | awk '{print $2}'" )
def commit_prod_data(): if not env.host or env.type != "prod": abort("commit_prod_data should only be called on prod") with cd(env.directory): with prefix(env.activate): run("python manage.py dumpdata sites auth.group --indent=1 > ./initial_data.json" ) directories = "" for app in settings.PROD_DATA_MODELS: model_labels = [] if app == "sites" or app == "auth": continue fixtures_dir = "./%s/fixtures" % (app) directories += "%s/* " % fixtures_dir with fabric_settings(warn_only=True): run("mkdir %s" % (fixtures_dir)) for model in settings.PROD_DATA_MODELS[app]: model_labels.append("%s.%s" % (app, model)) with fabric_settings(warn_only=True): run("python manage.py file_cleanup %s.%s" % (app, model)) run("python manage.py dumpdata %s --indent=1 > %s/initial_data.json" % (" ".join(model_labels), fixtures_dir)) run("python copy_media.py prod out") run("git add ./initial_data.json ./prod_data/* %s" % directories) with fabric_settings(warn_only=True): run('git commit -m "Prod data commit from prod."') run("git push origin master")
def sync(postgres_role=None, use_text_dump=False): dump_file = '/tmp/pytexasweb_dump.sql' dump_flags = '-Fc' # in some cases, if the postgres role doesn't exist # on the machine (ex. installing from homebrew on OSX) # wencan choose to use a text dump only so user defaults # to database owner on restoration if use_text_dump: dump_flags = '' while 1: ans = prompt("Create a new db snapshot [y/n]?") if ans.lower() in ['y', 'n']: break if ans.lower() == 'y': sudo("pg_dump {} -h db.internal -U postgres pytexasweb > {}".format(dump_flags, dump_file), user=WEB_USER) get(remote_path=dump_file, local_path=dump_file) os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pytx.settings") mypath = os.path.dirname(__file__) sys.path.append(mypath) from django.conf import settings db = settings.DATABASES['default'] # if a different role is passed then use this role instead if postgres_role: with fabric_settings(warn_only=True): local('createuser --superuser {}'.format(postgres_role)) # replace postgres from the dumpfile with user local("perl -pi -e 's/postgres/{}/g' {}".format(postgres_role, dump_file)) db['USER'] = postgres_role with fabric_settings(warn_only=True): local("dropdb -h {HOST} -U {USER} -p {PORT} {NAME}".format(**db)) local("createdb -h {HOST} -U {USER} -p {PORT} {NAME}".format(**db)) if use_text_dump: local("psql {} < {}".format(db['NAME'], dump_file)) else: local("pg_restore -h {HOST} -U {USER} -d {NAME} -p {PORT} {}".format(dump_file, **db)) rsync_project( remote_dir='/var/www/static/uploads/', local_dir=settings.MEDIA_ROOT, upload=False )
def _install_python(self): Log.note("Install Python at {{instance_id}} ({{address}})", instance_id=self.instance.id, address=self.instance.ip_address) if fabric_files.exists("/usr/bin/pip"): with fabric_settings(warn_only=True): pip_version = sudo("pip --version") else: pip_version = "" if not pip_version.startswith("pip 9."): sudo("yum -y install python27") sudo("easy_install pip") with fabric_settings(warn_only=True): sudo("rm -f /usr/bin/pip") sudo("ln -s /usr/local/bin/pip /usr/bin/pip") sudo("pip install --upgrade pip")
def _run_task(self, task_command): options = {'pwd': self.working_directory(), 'command': task_command} with fabric_settings(warn_only=True): with lcd(self.working_directory()): if _platform == "darwin": script_command = "script %(pwd)s/frigg_testlog %(command)s" else: script_command = "script %(pwd)s/frigg_testlog -c \"%(command)s\" -q " run_result = local(script_command % options) run_result = local(task_command) self.result.succeeded = run_result.succeeded self.result.return_code += "%s," % run_result.return_code log = 'Task: %(command)s\n' % options log += '------------------------------------\n' with file("%(pwd)s/frigg_testlog" % options, "r") as f: log += f.read() + "\n" log += '------------------------------------\n' log += 'Exited with exit code: %s\n\n' % run_result.return_code self.result.result_log += log self.result.save()
def migrate(): if not env.host: with fabric_settings(warn_only=True): local("find */migrations -name '*.pyc' | xargs rm") local("python manage.py migrate --all --no-initial-data") else: abort("migrate can only be called locally.")
def test_compute_network(self, *_): self.test_name = 'test_compute_network' self.blueprint_path = './examples/compute-network.yaml' self.inputs = dict(self.client_config) self.initialize_local_blueprint() self.addCleanup(self.cleanup_uninstall) self.install_blueprint() sleep(10) try: server_node_instance = \ self.cfy_local.storage.get_node_instances('vm')[0] ip_address = \ server_node_instance.runtime_properties['ip'] except (KeyError, IndexError) as e: raise Exception('Missing Runtime Property: {0}'.format(str(e))) with fabric_settings( host_string=ip_address, key_filename=path.join(path.expanduser('~/'), '.ssh/vmware-centos.key'), user='******', abort_on_prompts=True): fabric_run_output = fabric_run('last') self.assertEqual(0, fabric_run_output.return_code) self.uninstall_blueprint()
def _run_task(self, task_command): options = { 'pwd': self.working_directory(), 'command': task_command } with fabric_settings(warn_only=True): with lcd(self.working_directory()): if _platform == "darwin": script_command = "script %(pwd)s/frigg_testlog %(command)s" else: script_command = "script %(pwd)s/frigg_testlog -c \"%(command)s\" -q " run_result = local(script_command % options) run_result = local(task_command) self.result.succeeded = run_result.succeeded self.result.return_code += "%s," % run_result.return_code log = 'Task: %(command)s\n' % options log += '------------------------------------\n' with file("%(pwd)s/frigg_testlog" % options, "r") as f: log += f.read() + "\n" log += '------------------------------------\n' log += 'Exited with exit code: %s\n\n' % run_result.return_code self.result.result_log += log self.result.save()
def run_test(conn, ip_address, key_file_path): lgr.info('Bootstrapping a Cloudify manager...') os.system('cfy --version') lgr.info('Writing inputs file...') inputs = json.dumps( { 'public_ip': ip_address, 'private_ip': 'localhost', 'ssh_user': USER, 'ssh_key_filename': key_file_path, 'agents_user': USER }, indent=2) lgr.info('Bootstrap inputs: {0}'.format(inputs)) with open('inputs.json', 'w') as f: f.write(inputs) execute('cfy init') execute('cfy bootstrap -p ../simple-manager-blueprint.yaml ' '-i inputs.json --install-plugins') generated_key_path = '/root/.ssh/key.pem' lgr.info('Generating SSH keys for hello-world deployment...') with fabric_settings(host_string='{0}:{1}'.format(ip_address, 22), user=USER, key_filename=key_file_path, timeout=30): fabric_run('sudo ssh-keygen -f {0} -q -t rsa -N ""'.format( generated_key_path)) fabric_run('sudo cat {0}.pub >> ~/.ssh/authorized_keys'.format( generated_key_path)) execute('git clone {0}'.format(HELLO_WORLD_URL)) webserver_port = HELLO_WORLD_PORT hello_inputs = json.dumps({ 'server_ip': 'localhost', 'agent_user': USER, 'agent_private_key_path': generated_key_path, 'webserver_port': webserver_port }) with open('hello-inputs.json', 'w') as f: f.write(hello_inputs) execute('cfy blueprints upload -b {0} -p ' 'cloudify-hello-world-example/singlehost-blueprint.yaml'.format( BLUEPRINT_ID)) execute('cfy deployments create -b {0} -d {1} -i hello-inputs.json'.format( BLUEPRINT_ID, DEPLOYMENT_ID)) # Sleep some time because of CFY-4066 lgr.info('Waiting for 15 seconds before executing install workflow...') time.sleep(15) execute('cfy executions start -d {0} -w install'.format(DEPLOYMENT_ID)) url = 'http://{0}:{1}'.format(ip_address, webserver_port) lgr.info('Verifying deployment at {0}'.format(url)) urllib2.urlopen(url).read() lgr.info('Deployment is running!')
def handle(self, *args, **kwargs): self.check_args(*args, **kwargs) project_root = kwargs["project_root"] staging_server = kwargs["staging_server"] with lcd(project_root): with fabric_settings(host_string=staging_server): pull_staging_to_localhost()
def handle(self, *args, **kwargs): self.check_args(*args, **kwargs) project_root = kwargs["project_root"] production_server = kwargs["production_server"] with lcd(project_root): with fabric_settings(host_string=production_server): setup_production()
def nginx_conf(): "Restart the web server" with cd(env.project_root): sudo("cp conf/%(name)s/%(nginx_conf)s /etc/nginx/sites-available/.") with fabric_settings(warn_only=True): sudo( "ln -s /etc/nginx/sites-available/%(nginx_conf)s /etc/nginx/sites-enabled/%(nginx_conf)s" )
def schemamigrate(): if not env.host: apps = list(set(app.app_name for app in MigrationHistory.objects.all())) with fabric_settings(warn_only=True): for app in apps: local("python manage.py schemamigration %s --auto" % app) else: abort("Schemamigrate can only be called locally.")
def handle(self, *args, **kwargs): self.check_args(*args, **kwargs) hg_revision = kwargs["hg_revision"] project_root = kwargs["project_root"] production_server = kwargs["production_server"] with lcd(project_root): with fabric_settings(host_string=production_server): pull_staging_to_production(hg_revision=hg_revision)
def run_test(conn, ip_address, key_file_path): lgr.info('Bootstrapping a Cloudify manager...') os.system('cfy --version') lgr.info('Writing inputs file...') inputs = json.dumps({ 'public_ip': ip_address, 'private_ip': 'localhost', 'ssh_user': USER, 'ssh_key_filename': key_file_path, 'agents_user': USER }, indent=2) lgr.info('Bootstrap inputs: {0}'.format(inputs)) with open('inputs.json', 'w') as f: f.write(inputs) execute('cfy init') execute('cfy bootstrap -p ../simple-manager-blueprint.yaml ' '-i inputs.json --install-plugins') generated_key_path = '/root/.ssh/key.pem' lgr.info('Generating SSH keys for hello-world deployment...') with fabric_settings(host_string='{0}:{1}'.format(ip_address, 22), user=USER, key_filename=key_file_path, timeout=30): fabric_run('sudo ssh-keygen -f {0} -q -t rsa -N ""'.format( generated_key_path)) fabric_run('sudo cat {0}.pub >> ~/.ssh/authorized_keys'.format( generated_key_path)) execute('git clone {0}'.format(HELLO_WORLD_URL)) webserver_port = HELLO_WORLD_PORT hello_inputs = json.dumps({ 'server_ip': 'localhost', 'agent_user': USER, 'agent_private_key_path': generated_key_path, 'webserver_port': webserver_port }) with open('hello-inputs.json', 'w') as f: f.write(hello_inputs) execute('cfy blueprints upload -b {0} -p ' 'cloudify-hello-world-example/singlehost-blueprint.yaml'.format( BLUEPRINT_ID)) execute('cfy deployments create -b {0} -d {1} -i hello-inputs.json'.format( BLUEPRINT_ID, DEPLOYMENT_ID)) # Sleep some time because of CFY-4066 lgr.info('Waiting for 15 seconds before executing install workflow...') time.sleep(15) execute('cfy executions start -d {0} -w install'.format(DEPLOYMENT_ID)) url = 'http://{0}:{1}'.format(ip_address, webserver_port) lgr.info('Verifying deployment at {0}'.format(url)) urllib2.urlopen(url).read() lgr.info('Deployment is running!')
def handle(self, *args, **kwargs): self.check_args(*args, **kwargs) hg_revision = kwargs["hg_revision"] project_root = kwargs["project_root"] staging_server = kwargs["staging_server"] with lcd(project_root): with fabric_settings(host_string=staging_server): pull_development_to_staging(hg_revision=hg_revision)
def _start_supervisor(self): put("./examples/config/es_supervisor.conf", "/etc/supervisord.conf", use_sudo=True) # START DAEMON (OR THROW ERROR IF RUNNING ALREADY) with fabric_settings(warn_only=True): sudo("supervisord -c /etc/supervisord.conf") sudo("supervisorctl reread") sudo("supervisorctl update")
def commit_local_data(): if env.type != "staging": abort("commit_local_data should only be called on staging") with cd(env.directory): with prefix(env.activate): model_labels = [] for app in settings.LOCAL_DATA_MODELS: for model in settings.LOCAL_DATA_MODELS[app]: model_labels.append("%s.%s" % ( app, model)) with fabric_settings(warn_only=True): run("python manage.py file_cleanup %s.%s" % (app, model)) run("python copy_media.py local out") with fabric_settings(warn_only=True): run("mkdir ./local_data/fixtures") run("python manage.py dumpdata %s --indent=1 > ./local_data/fixtures/local_data.json" % (" ".join(model_labels))) run("git add ./local_data/*") with fabric_settings(warn_only=True): run('git commit -m "Local data commit from staging."') run("git push origin dev")
def checkout(): """Checkout from the specified branch.""" _require_server() _require_branch() with cd(env.path): run('git fetch origin') with fabric_settings(hide('warnings'), warn_only=True): run('git checkout -b %(branch)s origin/%(branch)s' % env) run('git checkout %(branch)s' % env) run('git pull origin %(branch)s' % env)
def _install_lib(self, lib_name, install="install"): """ :param lib_name: :param install: use 'groupinstall' if you wish :return: """ with fabric_settings(warn_only=True): result = sudo("yum "+install+" -y "+lib_name) if result.return_code != 0 and result.find("already installed and latest version")==-1: Log.error("problem with install of {{lib}}", lib=lib_name)
def satellite_capsule_zstream_upgrade(cap_host): """Upgrades Capsule to its latest zStream version :param string cap_host: Capsule hostname onto which the capsule upgrade will run Note: For zstream upgrade both 'To' and 'From' version should be same FROM_VERSION Current satellite version which will be upgraded to latest version TO_VERSION Next satellite version to which satellite will be upgraded """ logger.highlight('\n========== CAPSULE UPGRADE =================\n') from_version = settings.upgrade.from_version to_version = settings.upgrade.to_version if not from_version == to_version: logger.highlight( 'zStream Upgrade on Capsule cannot be performed as FROM and TO ' 'versions are not same. Aborting...') sys.exit(1) major_ver = settings.upgrade.os[-1] ak_name = settings.upgrade.capsule_ak[settings.upgrade.os] run(f'subscription-manager register --org="Default_Organization" ' f'--activationkey={ak_name} --force') logger.info( f"Activation key {ak_name} registered capsule's all available repository" ) run("subscription-manager repos --list") capsule_repos = [ RHEL_CONTENTS["tools"]["label"], RHEL_CONTENTS["capsule"]["label"], RHEL_CONTENTS["maintenance"]["label"] ] with fabric_settings(warn_only=True): if settings.upgrade.distribution == "cdn": enable_disable_repo(enable_repos_name=capsule_repos) else: enable_disable_repo(disable_repos_name=capsule_repos) ansible_repos = [ f"rhel-{major_ver}-server-ansible-" f"{settings.upgrade.ansible_repo_version}-rpms" ] enable_disable_repo(enable_repos_name=ansible_repos) # Check what repos are set # setup_foreman_maintain_repo() if settings.upgrade.foreman_maintain_capsule_upgrade: upgrade_using_foreman_maintain(sat_host=False) else: nonfm_upgrade(satellite_upgrade=False) # Rebooting the capsule for kernel update if any if settings.upgrade.satellite_capsule_setup_reboot: reboot(160) host_ssh_availability_check(cap_host) # Check if Capsule upgrade is success upgrade_validation()
def _install_lib(self, lib_name, install="install"): """ :param lib_name: :param install: use 'groupinstall' if you wish :return: """ with fabric_settings(warn_only=True): result = sudo("yum " + install + " -y " + lib_name) if result.return_code != 0 and result.find( "already installed and latest version") == -1: Log.error("problem with install of {{lib}}", lib=lib_name)
def verify_connectivity_to_instance(ip_address, key_filename): port = 22 try: lgr.info('Verifying SSH connectivity to: {0}:{1}'.format( ip_address, port)) with fabric_settings(host_string='{0}:{1}'.format(ip_address, port), user=USER, key_filename=key_filename): fabric_run('echo "hello"', timeout=10) except Exception as e: lgr.warning('Unable to connect: {0}'.format(str(e))) raise
def commit_local_data(): if env.type != "staging": abort("commit_local_data should only be called on staging") with cd(env.directory): with prefix(env.activate): model_labels = [] for app in settings.LOCAL_DATA_MODELS: for model in settings.LOCAL_DATA_MODELS[app]: model_labels.append("%s.%s" % (app, model)) with fabric_settings(warn_only=True): run("python manage.py file_cleanup %s.%s" % (app, model)) run("python copy_media.py local out") with fabric_settings(warn_only=True): run("mkdir ./local_data/fixtures") run("python manage.py dumpdata %s --indent=1 > ./local_data/fixtures/local_data.json" % (" ".join(model_labels))) run("git add ./local_data/*") with fabric_settings(warn_only=True): run('git commit -m "Local data commit from staging."') run("git push origin dev")
def teardown( self, instance # THE boto INSTANCE OBJECT FOR THE MACHINE TO TEARDOWN ): with self.locker: self.instance = instance Log.note("teardown {{instance}}", instance=instance.id) self._config_fabric(instance) # ASK NICELY TO STOP Elasticsearch PROCESS with fabric_settings(warn_only=True): sudo("supervisorctl stop es") # ASK NICELY TO STOP "supervisord" PROCESS with fabric_settings(warn_only=True): sudo("ps -ef | grep supervisord | grep -v grep | awk '{print $2}' | xargs kill -SIGINT") # WAIT FOR SUPERVISOR SHUTDOWN pid = True while pid: with hide('output'): pid = sudo("ps -ef | grep supervisord | grep -v grep | awk '{print $2}'")
def handle(self, *args, **kwargs): self.check_args(*args, **kwargs) production_root_url = args[0] staging_server = kwargs["staging_server"] project_root = kwargs["project_root"] parent_project = kwargs["parent_project"] use_master_auth = not kwargs["no_master_auth"] with lcd(project_root): with fabric_settings(host_string=staging_server): setup_staging( production_root_url, local_master_django_project_dir=parent_project, use_master_auth=use_master_auth )
def setup_dirs(): with fabric_settings(warn_only=True): with cd(env.project_root): run("mkdir static" % env) run("mkdir media" % env) # CSS compress with cd(env.project_root): run("mkdir static/CACHE" % env) run("chmod 777 static/CACHE" % env) # logs run("touch /tmp/%(gunicorn_log)s" % env) run("chmod 777 /tmp/%(gunicorn_log)s" % env)
def setup_dirs(): with fabric_settings(warn_only=True): with cd(env.project_root): run("mkdir static") run("mkdir media") # CSS compress with cd(env.project_root): run("mkdir static/CACHE") run("chmod 777 static/CACHE") # Solr run("mkdir /tmp/solr.log") run("chmod 777 /tmp/solr.log")
def _run_task(self, task_command): with fabric_settings(warn_only=True): with lcd(self.working_directory): run_result = local(task_command, capture=True) self.result.succeeded = run_result.succeeded self.result.return_code += "%s," % run_result.return_code log = 'Task: {0}\n'.format(task_command) log += '------------------------------------\n' log += run_result log += '------------------------------------\n' log += 'Exited with exit code: %s\n\n' % run_result.return_code self.result.result_log += log self.result.save()
def get_single_ctx_val(self, command, expected_val, fab_kwargs, ip1, fabric_settings, tasks, api): with fabric_settings(**fab_kwargs): # TODO: Remove this retry when proper guest context client is implemented res_string = None for retry in xrange(5): if retry > 0: LOG.warning('Retrying guest context single value execution {}'.format(retry)) ctx_val_res = tasks.execute( api.run, command, hosts=["root@%s" % ip1] ) res_string = ctx_val_res.values()[0] if res_string == expected_val: break return res_string
def update_activity_log(dev, branch, step, output): from labmachine.fabsteps import remove_db_entry if remove_db_entry==step: # Don't try to get the Branch from the database # if we already removed it return branch_object = Branch.objects.get(dev=dev, branch=branch) log_file = join(branch_object.log_dir,'love-ops.log') with fabric_settings(host_string=django_settings.FABRIC_HOST, warn_only=True, abort_on_prompts=True): run('echo "[%s] <<<Begin %s>>>" >> %s' % (unicode(datetime.now()), step.__name__, log_file)) if output: run('echo "%s" >> %s' % (output.decode('utf8'), log_file)) # FIXME escape the output run('echo "[%s] <<<End %s>>>" >> %s' % (unicode(datetime.now()), step.__name__, log_file))
def get_single_ctx_val(self, command, expected_val, fab_kwargs, ip1, fabric_settings, tasks, api): with fabric_settings(**fab_kwargs): # TODO: Remove this retry when proper guest context client is implemented res_string = None for retry in xrange(5): if retry > 0: LOG.warning( 'Retrying guest context single value execution {}'. format(retry)) ctx_val_res = tasks.execute(api.run, command, hosts=["root@%s" % ip1]) res_string = ctx_val_res.values()[0] if res_string == expected_val: break return res_string
def shell_cmd(command: str, capture=True, shell=None, no_logs: bool = False) -> str: result = error = None with fabric_settings(abort_exception=Exception): from fabric.state import output output.stderr = False try: result = str(local(command, capture=capture, shell=shell)) except Exception as e: if not no_logs: logging.getLogger(__file__).error('Error during shell command execution. Command:"{}". Error:{}'.format(command, str(e))) error = e finally: logging.getLogger(__file__).debug( 'CMD:"{}". RESULT:{}'.format(command, result)) if error: raise error else: return result
def run_commands(list_of_commands, directory='', ignore_errors=False, pty=False, shell=False): """ Runs through fabric a list of shell commands given. Set ignore_errors to True, to don't stop when a command in the list fails. Set directory to the directory where you want to run the scripts """ output = u'' with fabric_settings(host_string=django_settings.FABRIC_HOST, warn_only=True, abort_on_prompts=True): with cd(directory): for command in list_of_commands: result = run(command, pty=pty, shell=shell) output += result.decode('utf8') if result.failed is True and ignore_errors is False: raise RunCommandsException(output) return output
def create_database(): if not env.host: with fabric_settings(warn_only=True): local("rm %s" % (settings.DATABASES['default']['NAME'])) local("python copy_media.py prod in") local("python manage.py syncdb --noinput --migrate") local("python manage.py clear_prod_stripe_ids") else: with cd(env.directory): with prefix(env.activate): if env.type=="prod": abort("Create database should never be called on prod.") elif env.type=="staging": run('echo "DROP DATABASE umeqo_staging_main; CREATE DATABASE umeqo_staging_main;"|python manage.py dbshell') elif env.type=="demo": run('echo "DROP DATABASE umeqo_demo_main; CREATE DATABASE umeqo_demo_main;"|python manage.py dbshell') run("python manage.py syncdb --noinput --migrate") run("python copy_media.py prod in") run("python manage.py clear_prod_stripe_ids")
def _setup_etl_supervisor(self, cpu_count): # INSTALL supervsor sudo("apt-get install -y supervisor") with fabric_settings(warn_only=True): sudo("service supervisor start") # READ LOCAL CONFIG FILE, ALTER IT FOR THIS MACHINE RESOURCES, AND PUSH TO REMOTE conf_file = File("./examples/config/etl_supervisor.conf") content = conf_file.read_bytes() find = between(content, "numprocs=", "\n") content = content.replace("numprocs=" + find + "\n", "numprocs=" + str(cpu_count) + "\n") File("./temp/etl_supervisor.conf.alt").write_bytes(content) sudo("rm -f /etc/supervisor/conf.d/etl_supervisor.conf") put("./temp/etl_supervisor.conf.alt", '/etc/supervisor/conf.d/etl_supervisor.conf', use_sudo=True) run("mkdir -p /home/ubuntu/ActiveData-ETL/results/logs") # POKE supervisor TO NOTICE THE CHANGE sudo("supervisorctl reread") sudo("supervisorctl update")
def _refresh_indexer(): result = run("ps -ef | grep java | grep -v grep | awk '{print $2}'") if not result: _start_es() with cd("/home/ec2-user/TestLog-ETL/"): result = run("git pull origin push-to-es") if result.find("Already up-to-date.") != -1: Log.note("No change required") else: # KILL EXISTING "python27" PROCESS with fabric_settings(warn_only=True): run("ps -ef | grep python27 | grep -v grep | awk '{print $2}' | xargs kill -9") Thread.sleep(seconds=5) result = run("ps -ef | grep python27 | grep -v grep | awk '{print $2}'") if not result: with shell_env(PYTHONPATH="."): _run_remote("python27 testlog_etl/push_to_es.py --settings=./resources/settings/push_to_es_staging_settings.json", "push_to_es")
def test_blueprint_example(self, *_): self.test_name = 'test_blueprint_example' self.blueprint_path = './examples/local/blueprint.yaml' self.inputs = dict(self.client_config) self.inputs.update({ 'external_network_id': 'dda079ce-12cf-4309-879a-8e67aec94de4', 'example_subnet_cidr': '10.10.0.0/24', 'name_prefix': 'blueprint_', 'image_id': 'e41430f7-9131-495b-927f-e7dc4b8994c8', 'flavor_id': '3', 'agent_user': '******' }) self.initialize_local_blueprint() self.install_blueprint() time.sleep(10) private_key = StringIO.StringIO() try: server_floating_ip = \ self.cfy_local.storage.get_node_instances( 'example-floating_ip_address')[0] server_key_instance = \ self.cfy_local.storage.get_node_instances( 'example-keypair')[0] ip_address = \ server_floating_ip.runtime_properties[ 'floating_ip_address'] private_key.write( server_key_instance.runtime_properties['private_key']) private_key.pos = 0 except (KeyError, IndexError) as e: raise Exception('Missing Runtime Property: {0}'.format(str(e))) with fabric_settings(host_string=ip_address, key=private_key.read(), user=self.inputs.get('agent_user'), abort_on_prompts=True): fabric_run_output = fabric_run('last') self.assertEqual(0, fabric_run_output.return_code) # execute uninstall workflow self.uninstall_blueprint()
def create_database(): if not env.host: with fabric_settings(warn_only=True): local("rm %s" % (settings.DATABASES['default']['NAME'])) local("python copy_media.py prod in") local("python manage.py syncdb --noinput --migrate") local("python manage.py clear_prod_stripe_ids") else: with cd(env.directory): with prefix(env.activate): if env.type == "prod": abort("Create database should never be called on prod.") elif env.type == "staging": run('echo "DROP DATABASE umeqo_staging_main; CREATE DATABASE umeqo_staging_main;"|python manage.py dbshell' ) elif env.type == "demo": run('echo "DROP DATABASE umeqo_demo_main; CREATE DATABASE umeqo_demo_main;"|python manage.py dbshell' ) run("python manage.py syncdb --noinput --migrate") run("python copy_media.py prod in") run("python manage.py clear_prod_stripe_ids")
def es_node_service_restart(es_node_hostname, service_name): with fabric_settings(serial=True, eagerly_disconnect=True, host_string=es_node_hostname, key=settings.secret_key, user=settings.remote_user, timeout=45): try: print('Restarting ES service {} on host: {}'.format( service_name, es_node_hostname)) service_restart_result = sudo( 'service {} restart'.format(service_name)) if service_restart_result.return_code != 0: service_restart_status = 'Unsuccessful' else: service_restart_status = 'Successful' print('Restart of ES service {} on host: {}' ' - status:{}'.format(service_name, es_node_hostname, service_restart_status)) except Exception as e: print('Error: {}'.format(e))
def get_full_ctx(self, command, fab_kwargs, ip1, fabric_settings, tasks, api): with fabric_settings(**fab_kwargs): res_string = '' # TODO: Remove this retry when proper guest context client is implemented ctx_res_json = {} for retry in xrange(5): if retry > 0: LOG.warning('Retrying guest context whole definition execution {}'.format(retry)) try: ctx_res = tasks.execute( api.run, command, hosts=["root@%s" % ip1] ) res_string = ctx_res.values()[0] ctx_res_json = json.loads(res_string) except: continue else: break return ctx_res_json, res_string
def test_existing_compute_storage(self, *_): # Install the Actual VM self.test_name = 'test_existing_compute' self.blueprint_path = './examples/compute-storage.yaml' self.inputs = dict(self.client_config) self.initialize_local_blueprint() self.addCleanup(self.cleanup_uninstall) self.install_blueprint() sleep(10) try: server_node_instance = \ self.cfy_local.storage.get_node_instances('vm')[0] server_name = \ server_node_instance.runtime_properties['name'] except (KeyError, IndexError) as e: raise Exception('Missing Runtime Property: {0}'.format(str(e))) # "Install" the "External" VM new_inputs = deepcopy(self.inputs) new_inputs.update({'old_vm': True, 'server_name': server_name}) _cfy_local = self.initialize_local_blueprint(self.test_name + '2', new_inputs) self.install_blueprint(cfy_local=_cfy_local) try: server_node_instance = \ _cfy_local.storage.get_node_instances('vm')[0] ip_address = \ server_node_instance.runtime_properties['public_ip'] except (KeyError, IndexError) as e: raise Exception('Missing Runtime Property: {0}'.format(str(e))) with fabric_settings(host_string=ip_address, key_filename=path.join(path.expanduser('~/'), '.ssh/vmware-centos.key'), user='******', abort_on_prompts=True): fabric_run_output = fabric_run('last') self.assertEqual(0, fabric_run_output.return_code) self.uninstall_blueprint()
def get_full_ctx(self, command, fab_kwargs, ip1, fabric_settings, tasks, api): with fabric_settings(**fab_kwargs): res_string = '' # TODO: Remove this retry when proper guest context client is implemented ctx_res_json = {} for retry in xrange(5): if retry > 0: LOG.warning( 'Retrying guest context whole definition execution {}'. format(retry)) try: ctx_res = tasks.execute(api.run, command, hosts=["root@%s" % ip1]) res_string = ctx_res.values()[0] ctx_res_json = json.loads(res_string) except: continue else: break return ctx_res_json, res_string
def install_config_files(dev, branch): """ Fulfill the templates for config files and push them to the server """ branch_object = Branch.objects.get(dev=dev, branch=branch) # Prepare data for templates context = fill_in_the_templates(branch_object) # Create dirs and install templates supervisor_path = join(branch_object.config_dir, 'supervisor/') nginx_path = join(branch_object.config_dir, 'nginx/') command_list = ['mkdir -p %s' % branch_object.config_dir, 'mkdir -p %s' % supervisor_path, 'mkdir -p %s' % nginx_path, ] output = run_commands(command_list) with fabric_settings(host_string=django_settings.FABRIC_HOST, warn_only=True): template_list = django_settings.SUPERVISOR_TEMPLATE_LIST for (template_path, target, ignore) in template_list: source = join(django_settings.SUPERVISOR_TEMPLATE_PATH, template_path) dest = join(branch_object.directory, target) upload_template(source, dest, context=context, backup=False) output += '%s is uploaded\n' % dest return output
def _refresh_indexer(): result = run("ps -ef | grep java | grep -v grep | awk '{print $2}'") if not result: _start_es() with cd("/home/ec2-user/TestLog-ETL/"): result = run("git pull origin push-to-es") if result.find("Already up-to-date.") != -1: Log.note("No change required") else: # KILL EXISTING "python27" PROCESS with fabric_settings(warn_only=True): run("ps -ef | grep python27 | grep -v grep | awk '{print $2}' | xargs kill -9" ) Thread.sleep(seconds=5) result = run( "ps -ef | grep python27 | grep -v grep | awk '{print $2}'") if not result: with shell_env(PYTHONPATH="."): _run_remote( "python27 testlog_etl/push_to_es.py --settings=./resources/settings/push_to_es_staging_settings.json", "push_to_es")
def _install_es(self, gigabytes, es_version="6.2.3"): volumes = self.instance.markup.drives if not fabric_files.exists("/usr/local/elasticsearch/config/elasticsearch.yml"): with cd("/home/ec2-user/"): run("mkdir -p temp") if not File(LOCAL_JRE).exists: Log.error("Expecting {{file}} on manager to spread to ES instances", file=LOCAL_JRE) response = run("java -version", warn_only=True) if "Java(TM) SE Runtime Environment" not in response: with cd("/home/ec2-user/temp"): run('rm -f '+JRE) put(LOCAL_JRE, JRE) sudo("rpm -i "+JRE) sudo("alternatives --install /usr/bin/java java /usr/java/default/bin/java 20000") run("export JAVA_HOME=/usr/java/default") with cd("/home/ec2-user/"): run('wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-'+es_version+'.tar.gz') run('tar zxfv elasticsearch-'+es_version+'.tar.gz') sudo("rm -fr /usr/local/elasticsearch", warn_only=True) sudo('mkdir /usr/local/elasticsearch') sudo('cp -R elasticsearch-'+es_version+'/* /usr/local/elasticsearch/') with cd('/usr/local/elasticsearch/'): # BE SURE TO MATCH THE PLUGLIN WITH ES VERSION # https://github.com/elasticsearch/elasticsearch-cloud-aws sudo('sudo bin/elasticsearch-plugin install -b discovery-ec2') # REMOVE THESE FILES, WE WILL REPLACE THEM WITH THE CORRECT VERSIONS AT THE END sudo("rm -f /usr/local/elasticsearch/config/elasticsearch.yml") sudo("rm -f /usr/local/elasticsearch/config/jvm.options") sudo("rm -f /usr/local/elasticsearch/config/log4j2.properties") self.conn = self.instance.connection # MOUNT AND FORMAT THE VOLUMES (list with `lsblk`) for i, k in enumerate(volumes): if not fabric_files.exists(k.path): with fabric_settings(warn_only=True): sudo('sudo umount '+k.device) sudo('yes | sudo mkfs -t ext4 '+k.device) # ES AND JOURNALLING DO NOT MIX sudo('tune2fs -o journal_data_writeback '+k.device) sudo('tune2fs -O ^has_journal '+k.device) sudo('mkdir '+k.path) sudo('sudo mount '+k.device+' '+k.path) sudo('chown -R ec2-user:ec2-user '+k.path) # ADD TO /etc/fstab SO AROUND AFTER REBOOT sudo("sed -i '$ a\\"+k.device+" "+k.path+" ext4 defaults,nofail 0 2' /etc/fstab") # TEST IT IS WORKING sudo('mount -a') # INCREASE THE FILE HANDLE LIMITS with cd("/home/ec2-user/"): File("./results/temp/sysctl.conf").delete() get("/etc/sysctl.conf", "./results/temp/sysctl.conf", use_sudo=True) lines = File("./results/temp/sysctl.conf").read() if lines.find("fs.file-max = 100000") == -1: lines += "\nfs.file-max = 100000" lines = lines.replace("net.bridge.bridge-nf-call-ip6tables = 0", "") lines = lines.replace("net.bridge.bridge-nf-call-iptables = 0", "") lines = lines.replace("net.bridge.bridge-nf-call-arptables = 0", "") File("./results/temp/sysctl.conf").write(lines) put("./results/temp/sysctl.conf", "/etc/sysctl.conf", use_sudo=True) sudo("sudo sed -i '$ a\\vm.max_map_count = 262144' /etc/sysctl.conf") sudo("sysctl -p") # INCREASE FILE HANDLE PERMISSIONS sudo("sed -i '$ a\\root soft nofile 100000' /etc/security/limits.conf") sudo("sed -i '$ a\\root hard nofile 100000' /etc/security/limits.conf") sudo("sed -i '$ a\\root soft memlock unlimited' /etc/security/limits.conf") sudo("sed -i '$ a\\root hard memlock unlimited' /etc/security/limits.conf") sudo("sed -i '$ a\\ec2-user soft nofile 100000' /etc/security/limits.conf") sudo("sed -i '$ a\\ec2-user hard nofile 100000' /etc/security/limits.conf") sudo("sed -i '$ a\\ec2-user soft memlock unlimited' /etc/security/limits.conf") sudo("sed -i '$ a\\ec2-user hard memlock unlimited' /etc/security/limits.conf") if not fabric_files.exists("/data1/logs"): run('mkdir /data1/logs') run('mkdir /data1/heapdump') # COPY CONFIG FILES TO ES DIR if not fabric_files.exists("/usr/local/elasticsearch/config/elasticsearch.yml"): put("./examples/config/es6_log4j2.properties", '/usr/local/elasticsearch/config/log4j2.properties', use_sudo=True) jvm = File("./examples/config/es6_jvm.options").read().replace('\r', '') jvm = expand_template(jvm, {"memory": int(gigabytes/2)}) File("./results/temp/jvm.options").write(jvm) put("./results/temp/jvm.options", '/usr/local/elasticsearch/config/jvm.options', use_sudo=True) yml = File("./examples/config/es6_config.yml").read().replace("\r", "") yml = expand_template(yml, { "id": self.instance.ip_address, "data_paths": ",".join("/data" + text_type(i + 1) for i, _ in enumerate(volumes)) }) File("./results/temp/elasticsearch.yml").write(yml) put("./results/temp/elasticsearch.yml", '/usr/local/elasticsearch/config/elasticsearch.yml', use_sudo=True) sudo("chown -R ec2-user:ec2-user /usr/local/elasticsearch")
def test_servers_operations(self): dc = cr.Drive() sc = cr.Server() vc = cr.VLAN() puuid, p_pass = self._get_persistent_image_uuid_and_pass() LOG.debug('Get a vlan from the account') all_vlans = vc.list() if not all_vlans: raise SkipTest('There is no vlan in the acceptance test account') vlan = all_vlans[0] LOG.debug('Clone the persistent image') d1 = dc.clone(puuid, {'name': 'test_atom_clone_1'}) self._wait_for_status(d1['uuid'], status='unmounted', timeout=self.TIMEOUT_DRIVE_CLONING, client=dc) g_def = { "name": "test_server", "cpu": 1000, "mem": 1024 ** 3, 'vnc_password': '******', 'drives': [ { "device": "virtio", "dev_channel": "0:0", "drive": d1['uuid'], "boot_order": 1 }, ], "nics": [ { "ip_v4_conf": { "ip": None, "conf": "dhcp" }, "model": "virtio", }, { "model": "virtio", "vlan": vlan['uuid'], } ], } LOG.debug('Creating guest with drive') g1 = sc.create(g_def) self._wait_for_status(d1['uuid'], 'mounted', client=dc) LOG.debug('Clone the guest') g2 = sc.clone(g1['uuid']) self._wait_for_status(g2['uuid'], 'stopped', client=sc) LOG.debug('Check if the drive is active ( mounted )') d2_uuid = g2['drives'][0]['drive']['uuid'] self._wait_for_status(d2_uuid, 'mounted', client=dc) LOG.debug('Start both guests') sc.start(g1['uuid']) sc.start(g2['uuid']) self._wait_for_status(g1['uuid'], 'running', client=sc) self._wait_for_status(g2['uuid'], 'running', client=sc) LOG.debug('Refetch guest configurations') g1 = sc.get(g1['uuid']) g2 = sc.get(g2['uuid']) LOG.debug('Get the assigned ips') ip1 = g1['nics'][0]['runtime']['ip_v4']["uuid"] ip2 = g2['nics'][0]['runtime']['ip_v4']["uuid"] self._wait_for_open_socket(ip1, 22, timeout=60, close_on_success=True) self._wait_for_open_socket(ip2, 22, timeout=40, close_on_success=True) from fabric.api import settings as fabric_settings from fabric import tasks, api fab_kwargs = { "warn_only": True, "abort_on_prompts": True, "use_ssh_config": p_pass is None } LOG.debug('Using fabric config {}'.format(fab_kwargs)) if p_pass is not None: fab_kwargs['password'] = p_pass LOG.debug('Using a password to SSH to the servers ( not using ssh config )') with fabric_settings(**fab_kwargs): LOG.debug('Changing hostnames and restarting avahi on guest 1') set_hostname = 'hostname {} && service avahi-daemon restart' tasks.execute( api.run, set_hostname.format("atom1"), hosts=["root@%s" % ip1] ) LOG.debug('Changing hostnames and restarting avahi on guest 2') tasks.execute( api.run, set_hostname.format("atom2"), hosts=["root@%s" % ip2] ) LOG.debug('Ping the two hosts via private network') ping_res = tasks.execute( api.run, "ping atom2.local -c 1", hosts=["root@%s" % ip1] ) self.assertEqual(ping_res.values()[0].return_code, 0, 'Could not ping host atom2 from atom1') LOG.debug('Halt both servers') tasks.execute( api.run, "halt", hosts=["root@%s" % ip1, "root@%s" % ip2] ) LOG.debug('Wait for complete shutdown') self._wait_for_status(g1['uuid'], 'stopped', client=sc, timeout=40) self._wait_for_status(g2['uuid'], 'stopped', client=sc) LOG.debug('Deleting both guests') sc.delete(g1['uuid']) sc.delete(g2['uuid']) LOG.debug('Deleting both drives') dc.delete(d1['uuid']) dc.delete(d2_uuid) self._wait_deleted(d1['uuid'], client=dc) self._wait_deleted(d2_uuid, client=dc)
def _install_es(self, gigabytes): volumes = self.instance.markup.drives if not fabric_files.exists("/usr/local/elasticsearch"): with cd("/home/ec2-user/"): run("mkdir -p temp") if not File(LOCAL_JRE).exists: Log.error("Expecting {{file}} on manager to spread to ES instances", file=LOCAL_JRE) with cd("/home/ec2-user/temp"): run('rm -f '+JRE) put("resources/"+JRE, JRE) sudo("rpm -i "+JRE) sudo("alternatives --install /usr/bin/java java /usr/java/default/bin/java 20000") run("export JAVA_HOME=/usr/java/default") with cd("/home/ec2-user/"): run('wget https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-1.7.1.tar.gz') run('tar zxfv elasticsearch-1.7.1.tar.gz') sudo('mkdir /usr/local/elasticsearch') sudo('cp -R elasticsearch-1.7.1/* /usr/local/elasticsearch/') with cd('/usr/local/elasticsearch/'): # BE SURE TO MATCH THE PLUGLIN WITH ES VERSION # https://github.com/elasticsearch/elasticsearch-cloud-aws sudo('bin/plugin -install elasticsearch/elasticsearch-cloud-aws/2.7.1') #REMOVE THESE FILES, WE WILL REPLACE THEM WITH THE CORRECT VERSIONS AT THE END sudo("rm -f /usr/local/elasticsearch/config/elasticsearch.yml") sudo("rm -f /usr/local/elasticsearch/bin/elasticsearch.in.sh") self.conn = self.instance.connection # MOUNT AND FORMAT THE EBS VOLUMES (list with `lsblk`) for i, k in enumerate(volumes): if not fabric_files.exists(k.path): with fabric_settings(warn_only=True): sudo('sudo umount '+k.device) sudo('yes | sudo mkfs -t ext4 '+k.device) sudo('mkdir '+k.path) sudo('sudo mount '+k.device+' '+k.path) #ADD TO /etc/fstab SO AROUND AFTER REBOOT sudo("sed -i '$ a\\"+k.device+" "+k.path+" ext4 defaults,nofail 0 2' /etc/fstab") # TEST IT IS WORKING sudo('mount -a') # INCREASE THE FILE HANDLE LIMITS with cd("/home/ec2-user/"): File("./results/temp/sysctl.conf").delete() get("/etc/sysctl.conf", "./results/temp/sysctl.conf", use_sudo=True) lines = File("./results/temp/sysctl.conf").read() if lines.find("fs.file-max = 100000") == -1: lines += "\nfs.file-max = 100000" lines = lines.replace("net.bridge.bridge-nf-call-ip6tables = 0", "") lines = lines.replace("net.bridge.bridge-nf-call-iptables = 0", "") lines = lines.replace("net.bridge.bridge-nf-call-arptables = 0", "") File("./results/temp/sysctl.conf").write(lines) put("./results/temp/sysctl.conf", "/etc/sysctl.conf", use_sudo=True) sudo("sysctl -p") # INCREASE FILE HANDLE PERMISSIONS sudo("sed -i '$ a\\root soft nofile 50000' /etc/security/limits.conf") sudo("sed -i '$ a\\root hard nofile 100000' /etc/security/limits.conf") sudo("sed -i '$ a\\root memlock unlimited' /etc/security/limits.conf") sudo("sed -i '$ a\\ec2-user soft nofile 50000' /etc/security/limits.conf") sudo("sed -i '$ a\\ec2-user hard nofile 100000' /etc/security/limits.conf") sudo("sed -i '$ a\\ec2-user memlock unlimited' /etc/security/limits.conf") # EFFECTIVE LOGIN TO LOAD CHANGES TO FILE HANDLES # sudo("sudo -i -u ec2-user") if not fabric_files.exists("/data1/logs"): sudo('mkdir /data1/logs') sudo('mkdir /data1/heapdump') #INCREASE NUMBER OF FILE HANDLES # sudo("sysctl -w fs.file-max=64000") # COPY CONFIG FILE TO ES DIR if not fabric_files.exists("/usr/local/elasticsearch/config/elasticsearch.yml"): yml = File("./examples/config/es_config.yml").read().replace("\r", "") yml = expand_template(yml, { "id": Random.hex(length=8), "data_paths": ",".join("/data"+unicode(i+1) for i, _ in enumerate(volumes)) }) File("./results/temp/elasticsearch.yml").write(yml) put("./results/temp/elasticsearch.yml", '/usr/local/elasticsearch/config/elasticsearch.yml', use_sudo=True) # FOR SOME REASON THE export COMMAND DOES NOT SEEM TO WORK # THIS SCRIPT SETS THE ES_MIN_MEM/ES_MAX_MEM EXPLICITLY if not fabric_files.exists("/usr/local/elasticsearch/bin/elasticsearch.in.sh"): sh = File("./examples/config/es_run.sh").read().replace("\r", "") sh = expand_template(sh, {"memory": unicode(int(gigabytes/2))}) File("./results/temp/elasticsearch.in.sh").write(sh) with cd("/home/ec2-user"): put("./results/temp/elasticsearch.in.sh", './temp/elasticsearch.in.sh', use_sudo=True) sudo("cp -f ./temp/elasticsearch.in.sh /usr/local/elasticsearch/bin/elasticsearch.in.sh")