def _load_config(**kwargs): """Find and parse server config file. If `config` keyword argument wasn't set look for default 'server_config.yaml' or 'server_config.json' file. """ config, ext = os.path.splitext( kwargs.get( 'config', 'server_config.yaml' if os.path.exists('server_config.yaml') else 'server_config.json')) if not os.path.exists(config + ext): print _red('Error. "%s" file not found.' % (config + ext)) return {} if YAML_AVAILABLE and ext == '.yaml': loader = yaml elif JSON_AVAILABLE and ext == '.json': loader = json else: print _red('Parser package not available') return {} # Open file and deserialize settings. with open(config + ext) as config_file: return loader.load(config_file)
def restart_gunicorn(): with cd("/home/ubuntu/VirtualEnvironment/"): with prefix("source bin/activate"): result = run('if ps aux | grep -v grep | grep -i "gunicorn"; then echo 1; else echo ""; fi') if result: print ("\n\n%s\n\n"%_green("Gunicorn is running")) confirmation = confirm("Do you want to restart gunicorn", default=True) if confirmation: pid = run("ps aux | grep gunicorn | awk 'NR==1 {print $2}'") run("sudo kill -9 %s"%pid) result = run('if ps aux | grep -v grep | grep -i "gunicorn"; then echo 1; else echo ""; fi') if not result: print ("\n\n%s\n\n"%_red("Gunicorn has been stopped and is starting with new repo")) with cd("canworks"): run("gunicorn -c canworks/configs/gunicorn_config.py api:app") result = run('if ps aux | grep -v grep | grep -i "gunicorn"; then echo 1; else echo ""; fi') if result: print ("\n\n%s\n\n"%_green("Gunicorn is running")) else: print ("\n\n%s\n\n"%_red("Gunicorn is not running, U need to login to the server")) else: print ("\n\n%s\n\n"%_red("Gunicorn has not been stopped")) return
def clone_puppet_repo(retry_count=0): with cd('/etc/puppet/modules'): if not exists('couchdb'): try: sudo( 'git clone https://github.com/wieden-kennedy/spandex-couch /etc/puppet/modules/couchdb' ) except SystemExit as e: if retry_count < 1: print(retry_count) retry_count += 1 print( _red( "\nError with github authentication. Reattempting one more time.\n" )) clone_puppet_repo(retry_count) else: print( _red( "\nGithub authentication failed. Please run the command again with the proper credentials." )) else: print( _green( "Puppet module already exists. Attempting to update module from git." )) update_puppet_repo()
def ami_status(ec2conn, ami_id): getami_id = ec2conn.get_all_images(image_ids = ami_id) logging.info (_yellow( 'AMI is ') + _red(getami_id[0].state) ) while getami_id[0].state != 'available' : logging.info (_yellow( 'AMI is still ') + _red(getami_id[0].state) + ', sleeping for 5 seconds..') time.sleep(5) getami_id[0].update() logging.debug(_green( 'AMI Status' ) + _red(getami_id[0].state)) return getami_id[0].state
def render(slug): ''' Compile app template into HTML ''' if not os.path.exists('apps/%s' % slug): print _red('--- App with this slug can\'t be found(%s)' % slug) return env = Environment(loader=FileSystemLoader('templates')) template = env.get_template('apps/app_template.html') # Manifest data = '' with open("apps/%s/manifest.json" % slug, "r") as manifest: data = manifest.read().replace('\n', '') context = json.loads(data) # Description description = '' with open("apps/%s/description.md" % slug, "r") as desc_file: description = desc_file.read() # Images to /static/ if not os.path.exists('static/apps/%s' % slug): os.makedirs('static/apps/%s' % slug) if context['images'].get('logo'): shutil.copy2('apps/%s/images/%s' % (slug, context['images']['logo']), 'static/apps/%s' % (slug)) for screenshot in context['images'].get('screenshots'): shutil.copy2('apps/%s/images/screenshots/%s' % (slug, screenshot), 'static/apps/%s' % (slug)) # Description context['description'] = markdown.markdown(description) # Write output if not os.path.exists('templates/apps/%s' % slug): os.makedirs('templates/apps/%s' % slug) else: # remove old files open("templates/apps/%s/index.html" % slug, "w").close() output = open("templates/apps/%s/index.html" % slug, "w") output.write('{% extends "base.html" %}\n{% block body %}\n') output.write(template.render(context)) output.write('\n{% endblock %}') output.close() print _green('--- Done rendering. You can always re-render files with') print _yellow('--- fab render:%s' % slug)
def _set_vagrant_env(): env.python = 'python2.6' env.path = '/home/vagrant/www' env.repo_path = '/vagrant' env.env_path = os.path.join(env.path, 'env') env.code_path = os.path.join(env.repo_path, env.project_name) env.cmd_apache_start = 'sudo /usr/sbin/apache2ctl start' env.cmd_apache_stop = 'sudo /usr/sbin/apache2ctl stop' env.cmd_apache_restart = 'sudo /usr/sbin/apache2ctl restart' env.mysql_admin_user = '******' env.mysql_admin_pass = '******' try: sys.path.insert(0, env.project_name) from config.vagrant import DATABASES env.mysql_dbname = DATABASES['default']['NAME'] env.mysql_user = DATABASES['default']['USER'] env.mysql_pass = DATABASES['default']['PASSWORD'] except ImportError: print(_red('... Unable to get database configuration from Django project, falling back to defaults')) env.mysql_dbname = env.project_name env.mysql_user = env.project_name env.mysql_pass = env.project_name
def execute(self): subnet_id = self._job['options'][0] if 'options' in self._job and len( self._job['options'] ) > 0 else self._app['environment_infos']['subnet_ids'][0] private_ip_address = self._job['options'][ 1] if 'options' in self._job and len( self._job['options']) > 1 else None try: log(_green("STATE: Started"), self._log_file) instance = create_ec2_instance(self._cloud_connection, self._app, self._color, self._config, private_ip_address, subnet_id, self._log_file) self._worker.update_status( "done", message="Creating Instance OK: [{0}]\n\nPublic IP: {1}".format( self._app['name'], str(instance.ip_address))) log(_green("STATE: End"), self._log_file) except Exception as e: self._worker.update_status( "failed", message="Creating Instance Failed: [{0}]\n{1}".format( self._app['name'], e)) log(_red("STATE: END"), self._log_file)
def execute(self): if not boolify(self._config.get('enable_executescript_command', True)): return self._abort( "This command has been disabled by your administrator.") script = self._job['options'][0] if 'options' in self._job and len( self._job['options']) > 0 else None module_name = self._job['options'][1] if 'options' in self._job and len( self._job['options']) > 1 else None execution_strategy = self._job['options'][ 2] if 'options' in self._job and len( self._job['options']) > 2 else None if execution_strategy == 'single': # option[3] is a single Host IP fabric_execution_strategy = None safe_deployment_strategy = None single_host_ip = self._job['options'][ 3] if 'options' in self._job and len( self._job['options']) > 3 else None else: # option[2] is fabric type, option[3] might be Safe deploy group param fabric_execution_strategy = execution_strategy safe_deployment_strategy = self._job['options'][ 3] if 'options' in self._job and len( self._job['options']) > 3 else None single_host_ip = None try: log(_green("STATE: Started"), self._log_file) try: if not script or not script.strip(): return self._abort("No valid script provided") script_data = b64decode_utf8(script) allowed_shebang = ('#!/bin/bash', '#! /bin/bash', '#!/bin/sh', '#! /bin/sh') if not script_data.startswith(allowed_shebang): return self._abort( "No valid shell script provided (shebang missing)") except: return self._abort("No valid script provided") if single_host_ip: log( _yellow("Executing script on a single host: %s" % single_host_ip), self._log_file) self._exec_script_single_host(script_data, module_name, single_host_ip) else: log(_yellow("Executing script on every running instance"), self._log_file) self._exec_script(script_data, module_name, fabric_execution_strategy, safe_deployment_strategy) self._worker.update_status( "done", message=self._get_notification_message_done()) log(_green("STATE: End"), self._log_file) except Exception as e: self._worker.update_status( "failed", message=self._get_notification_message_failed(e)) log(_red("STATE: End"), self._log_file)
def create_server(): """ Creates EC2 Instance """ print(_green("Started...")) print(_red("...Creating Funnnnn EC2 instance...")) conn = boto.ec2.connect_to_region(ec2_region, aws_access_key_id=ec2_key, aws_secret_access_key=ec2_secret) image = conn.get_all_images(ec2_amis) reservation = image[0].run(1, 1, key_name=ec2_key_pair,instance_type=ec2_instancetype,user_data=user_data_script) instance = reservation.instances[0] conn.create_tags([instance.id], {Key:Tag}) conn.create_tags([instance.id], {'Name':Name}) while instance.state == u'pending': print(_yellow("Instance state: %s" % instance.state)) time.sleep(10) instance.update() print(_green("Instance state: %s" % instance.state)) print(_green("Public dns: %s" % instance.public_dns_name)) return instance.public_dns_name
def launch(ec2conn, ami_id, count, keys, image_type, hostname, subnet, description): logging.info (_yellow('getting the instance ready...')) #logging.info ('getting the instance ready...') ami = ec2conn.get_all_images(image_ids = ami_id) reservation = ami[0].run(max_count=count, key_name=keys, instance_type=image_type, monitoring_enabled=False, subnet_id=subnet) instance = reservation.instances[0] ec2conn.create_tags([instance.id], { "Name": hostname }) while instance.state == u'pending': logging.info(_yellow("Instance state is " + _red(instance.state) + ', sleeping for 10 seconds...')) #logging.info("Instance state: " + instance.state) time.sleep(10) instance.update() logging.info(_green("Instance id: ") + _red(instance.id)) logging.info(_green("Instance name: ") + _red(instance.tags['Name'])) logging.info(_green("Instance state: ") + _red(instance.state))
def create(instance_type): """ This creates a new instance in amazon cloud """ start_time = time.time() print(_green("Started...")) env.environment = None while env.environment not in ('Staging', 'Production'): environment = prompt('Please specify target environment: ') setattr(env, 'environment', environment.strip().capitalize()) try: fabconf, env_config = parse_ini(instance_type) except Exception as e: print(_red('Exception parsing config file: {}'.format(str(e)))) exit() env.user = fabconf['SERVER_USERNAME'] env.key_filename = fabconf['SSH_PRIVATE_KEY_PATH'] # import _crate_server and execute from misc import _create_server env.host_string, instance_id = _create_server() print(_green("Waiting 60 seconds for server to boot...")) time.sleep(60) try: exec ("from recipes.default_%s import create_recipe_%s as recipe" % (instance_type, env.environment.lower()), globals()) except Exception as e: print( _red('You are using incorrect instance conf name: {}'.format(str(e)))) exit() # import _over and execute from misc import _oven _oven(recipe) if 'LB_NAME' in fabconf: from lb import register_instance_in_lb register_instance_in_lb(fabconf['LB_NAME'], instance_id) end_time = time.time() print(_green("Runtime: %f minutes" % ((end_time - start_time) / 60))) print(_green(env.host_string))
def deploy(instance_type): """ merge and deploy new changes from production """ require('environment', provided_by=('staging', 'production')) try: fabconf, env_config = parse_ini(instance_type) except Exception as e: print(_red('Exception parsing config file: {}'.format(str(e)))) exit() if env.environment == "Production": tag = "" while not tag: tag = prompt('Please specify the release of the code: ') tag_message = prompt('Please specify the release description message: ') env.fabconf['TAG'] = tag env.fabconf['TAG_MESSAGE'] = tag_message try: exec("from recipes.default_%s import deploy_recipe_%s as recipe" % (instance_type, env.environment.lower()), globals()) except Exception as e: print( _red('Exception are using incorrect instance conf name: {}'.format(str(e)))) exit() start_time = time.time() print( _green( "Deploying changes to all instances of type webapp to %s environment" % (env.environment))) from misc import _deploy, _virtualenv _deploy(recipe) print(_green('Completed deployment from github')) print(_green('Starting DB Migrations...')) # _virtualenv('%(PROJECT_NAME)s/./manage.py resetdb') # will refresh the # database - new installation(s) _virtualenv('%(PROJECT_NAME)s/./manage.py migrate') print(_green('Starting Collect Static...')) _virtualenv('%(PROJECT_NAME)s/./manage.py collectstatic --noinput') end_time = time.time() print(_green("Runtime: %f minutes" % ((end_time - start_time) / 60)))
def restart_with_new_repo(): with cd("/home/ubuntu/VirtualEnvironment/"): with prefix("source bin/activate"): result = run( 'if ps aux | grep -v grep | grep -i "gunicorn"; then echo 1; else echo ""; fi' ) if result: print("\n\n%s\n\n" % _green("Gunicorn is running")) confirmation = confirm("Do you want to restart gunicorn", default=True) if confirmation: pid = run( "ps aux | grep gunicorn | awk 'NR==1 {print $2}'") run("sudo kill -9 %s" % pid) result = run( 'if ps aux | grep -v grep | grep -i "gunicorn"; then echo 1; else echo ""; fi' ) if not result: print("\n\n%s\n\n" % _red( "Gunicorn has been stopped and is starting with new repo" )) with cd("Canworks"): run("git pull origin master") run("gunicorn -c configs/gunicorn_config.py api:app" ) result = run( 'if ps aux | grep -v grep | grep -i "gunicorn"; then echo 1; else echo ""; fi' ) if result: print("\n\n%s\n\n" % _green("Gunicorn is running")) run("sudo service nginx restart") else: print("\n\n%s\n\n" % _red( "Gunicorn is not running, U need to login to the server" )) else: print("\n\n%s\n\n" % _red("Gunicorn has not been stopped")) return else: print("\n\n%s\n\n" % _red("Gunicorn has been started yet")) with cd("Canworks"): run("gunicorn -c configs/gunicorn_config.py api:app") restart_with_new_repo()
def _get_gpu_host(): conn = boto.ec2.connect_to_region(GPU_INSTANCE_REGION) reservations = conn.get_all_instances(filters={"tag:Name" : GPU_INSTANCE_NAME}) instances = [i for r in reservations for i in r.instances if i.state == 'running'] if len(instances) > 1: print(_red("Multiple GPU hosts detected. This script only supports one.")) exit() return instances[0] if len(instances) == 1 else []
def gmailbackup_install(): with cd('/data'): if not exists('gmailbackup'): run('mkdir gmailbackup') run('wget http://gmailbackup.googlecode.com/files/gmailbackup-20100324_0051.tgz') run('tar -xzf gmailbackup-20100324_0051.tgz') else: print(_red('Already installed'))
def prep_image( ec2conn, ami_id): logging.info (_yellow( 'Wating for AMI to be available....' )) newami_id = ami_status( ec2conn, ami_id ) if newami_id == 'available': logging.info(_green('Lets launch instance ..')) #launch_instance(ec2conn, ami_id) #ec2conn.run_instances( ami_id, key_name = keys, instance_type='m3.xlarge', dry_run = False ) else: sendError (_red('Instance Failed..!'))
def tag_volumes(ec2conn): print(_green('lets get info about all the volumes..')) volumes = ec2conn.get_all_volumes() for volume in volumes: instance = volume.attach_data.instance_id instance_tags = ec2conn.get_all_tags({'resource-id': instance}) for instance_tag in instance_tags: print(_green('Adding tags to Volume: ') + _red(volume.id) + ' ' + _green('Key: ') + _red(instance_tag.name) + ' ' + _green('Value: ') + _red(instance_tag.value)) volume.add_tag(instance_tag.name, instance_tag.value)
def config(): ENVIRONMENT = os.environ.get("COUCHENV", "development") if ENVIRONMENT in ['prod', 'production']: config_file_path = './config/production.json' elif ENVIRONMENT in ['test', 'staging']: config_file_path = './config/staging.json' elif ENVIRONMENT in ['dev', 'development']: config_file_path = './config/dev.json' elif ENVIORNMENT in ['local']: config_file_path = './config/local.json' else: abort( _red( 'No COUCHENV or config file defined. Either define COUCHENV=[prod|test|dev] or call fab config:/path/to/config/file spinup:[ID]' )) opts = {} with open(config_file_path) as data_file: ''' load in template data from data file ''' opts = json.load(data_file) opts['environment'] = ENVIRONMENT.upper() ''' ensure required properties are present in config file ''' if all(k in opts for k in ('user', 'ssh_keyfile', 'project_name', 'aws_access_key_id', 'aws_secret_access_key', 'aws_ami', 'aws_keypair_name', 'aws_ec2_region', 'aws_instance_type', 'aws_security_group')): return opts else: print( _red( 'One or more required fields are missing. Ensure that the following properties are included in your config file:\n' )) print( _white( '%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s' % ('user', 'ssh_keyfile', 'project_name', 'aws_access_key_id', 'aws_secret_access_key', 'aws_ami', 'aws_keypair_name', 'aws_ec2_region', 'aws_instance_type', 'aws_security_group'))) abort('Aborting.')
def sendError(message, parser=False): logging.basicConfig( stream=sys.stdout, level=logging.DEBUG, format=_magenta('%(asctime)-15s %(levelname)s >>>', bold=True) + ' %(message)s') logging.error(_red(message)) if parser: parser.print_help() sys.exit(1)
def _destroy_instance(instance): """ Destroy's instance """ conn = boto.connect_ec2(ec2_key, ec2_secret) try: conn.terminate_instances(instance_ids=[instance]) print(_yellow("Image {} destroyed successfully".format(instance))) except Exception as e: print(_red("Exception was raised: {}".format(str(e))))
def parse_ini(instance_type, check_all=True): parser = SafeConfigParser() parser.read(os.path.abspath('fabfile/conf/conf.ini')) parser.set('CONFIG', 'AFP_PATH', os.path.join(os.path.dirname(__file__), os.path.pardir)) parser.set('CONFIG', 'SSH_SETTING_PATH', _base('settings/ssh')) parser.set('PRODUCTION_ENV_CONFIG', 'AWS_ACCESS_KEY_ID', ec2_key) parser.set('PRODUCTION_ENV_CONFIG', 'AWS_SECRET_ACCESS_KEY', ec2_secret) parser.set('STAGING_ENV_CONFIG', 'AWS_ACCESS_KEY_ID', ec2_key) parser.set('STAGING_ENV_CONFIG', 'AWS_SECRET_ACCESS_KEY', ec2_secret) fabconf = {} _green("Parsing conf.ini file") for name, value in parser.items('CONFIG'): # print ' %s = %s' % (name, value) fabconf['%s' % name.upper()] = value for name, value in parser.items('%s' % env.environment.upper()): # print ' %s = %s' % (name, value) fabconf['%s' % name.upper()] = value if instance_type == 'messagingserver': fabconf['INSTANCE_NAME_TAG'] = "MessagingServer" fabconf['INSTANCE_RECIPE'] = "messagingserver" env_config = {} for name, value in parser.items('%s_ENV_CONFIG' % env.environment.upper()): # print ' %s = %s' % (name, value) env_config['%s' % name.upper()] = value fabconf['ENV_VARS'] = ','.join('{}="{}"'.format(i, k) for i, k in env_config.items()) env.fabconf = fabconf env.env_config = env_config env.ec2_amis = [fabconf['EC2_AMIS']] env.ec2_keypair = fabconf['EC2_KEYPAIR'] env.ec2_secgroups = [fabconf['EC2_SECGROUPS']] env.ec2_instancetype = fabconf['EC2_INSTANCETYPE'] print(_yellow("SSH private key verification...")) try: open(fabconf['SSH_PRIVATE_KEY_PATH']).read() except Exception, e: print(_red("SSH private key does not exist in the provided path %s !" % fabconf['SSH_PRIVATE_KEY_PATH'])) exit()
def _build_group(name, servers): """Recursively walk through servers dictionary and search for all server records.""" # We're going to reference server a lot, so we'd better store it. server = servers.get(name, None) # If `name` exists in servers dictionary we if server: # check whether it's a group by looking for `members` if isinstance(server, list): if fabric.state.output['debug']: puts("%s is a group, getting members" % name) for item in server: # and call this function for each of them. _build_group(item, servers) # When, finally, we dig through to the standalone server records, we retrieve # configs and store them in `env.group` else: if fabric.state.output['debug']: puts("%s is a server, filling up env.group" % name) env.group[server['host']] = server else: print _red('Error. "%s" config not found. Run `fab s` to list all available configs' % name)
def upload_s3(args): [key, value] = get_profile_credentials(args.profile) s3conn = tinys3.Connection(key, value, tls=True, endpoint='s3-us-west-2.amazonaws.com') now = datetime.datetime.now() dt = now.strftime("%Y%m%d%H") for file in os.listdir(args.log_dir): if fnmatch.fnmatch(file, args.pattern): log = os.path.join(args.log_dir,file) upload_file = open(log, 'rb') s3filelocation = "hour_id=" + dt + "/" + file s3conn.upload(s3filelocation, upload_file, args.bucket_name) print(_green('Uploading file') + " " + _red(log) + " to s3://s3-us-west-2.amazonaws.com/iedu-haproxy.log/" + s3filelocation)
def production(): """ Select production environment for commands """ if not confirm(_red('Production environment selected, are you sure?'), default=False): return env.settings = 'production' env.webfaction_username = '******' env.webfaction_appname = 'fishknows' env.hosts = ['fishknows.com'] _set_webfaction_env()
def create_bucket(bucket_name): '''Create a bucket''' start_time = time.time() conn = S3Connection(ec2_key, ec2_secret) print(_green("Creating bucket...")) try: bucket = conn.create_bucket(bucket_name=bucket_name) print _green('Bucket "%s" successfully created' % bucket_name) except Exception, e: print(_red('Create bucket error: {}'.format(str(e))))
def get_db_info(): rds_conn = boto.connect_rds2(profile_name=AWS_PROFILE) if not rds_conn: print(_red('Cannot connect to AWS.RDS')) return instances = rds_conn.describe_db_instances() if not instances: print(_red('No instances found')) return count = len(instances['DescribeDBInstancesResponse'] ['DescribeDBInstancesResult']['DBInstances']) for i in range(0, count): inst = instances['DescribeDBInstancesResponse'][ 'DescribeDBInstancesResult']['DBInstances'][i] #print(str(inst)) dbinfo = {} endpoint = inst['Endpoint'] dbinfo['VPCSecurityGroupId'] = inst['VpcSecurityGroups'][0][ 'VpcSecurityGroupId'] dbinfo['dbSecurityGroupName'] = inst['DBSecurityGroups'][0][ 'DBSecurityGroupName'] dbinfo['host'] = endpoint['Address'] dbinfo['port'] = endpoint['Port'] dbinfo['user'] = inst['MasterUsername'] dbinfo['name'] = inst['DBName'] dbinfo['instanceClass'] = inst['DBInstanceClass'] dbinfo['dbID'] = inst['DBInstanceIdentifier'] dbinfo['Engine'] = inst['Engine'] dbinfo['EngineVersion'] = inst['EngineVersion'] print('') print(_blue('db Info %d ===========>\n' % i)) for item in dbinfo: print(_green('%20s : %s' % (item, dbinfo[item])))
def install(): """ Install the gmailbackup scripts and all required software """ sudo('apt-get -y install git') append('~/.ssh/config', 'Host github.com') append('~/.ssh/config', '\tStrictHostKeyChecking no') if not exists('gmailbackup'): print _green("Getting gmaailbackup") run('git clone https://github.com/adamw523/gmailbackup.git') else: print(_red('Already have gmailbackup'))
def _virtualenv(command, env_path=None): """ Run a command in the virtualenv. This prefixes the command with the source command. Usage: virtualenv('pip install django') """ if env_path is None: abort(_red("You must provide the parameter 'env_path' to the virtualenv function")) source = 'source %s/bin/activate && ' % env_path print(source + command) local(source + command)
def _launch_gpu(): ''' Boots up a new GPU-based instance on EC2. ''' print(_green("Started...")) print(_green("Creating EC2 instance...")) try: # Create new instance conn = boto.ec2.connect_to_region(GPU_INSTANCE_REGION) reservation = conn.run_instances( GPU_INSTANCE_AMI_ID, key_name=GPU_INSTANCE_KEY, instance_type=GPU_INSTANCE_TYPE) # Assumes we're only using one instance instance = reservation.instances[0] # Wait for instance to boot up status = instance.update() while status == 'pending': print(_yellow("Booting instance ...")) time.sleep(10) status = instance.update() # Once instances are alive, do tagging and other post-activation work if status == 'running': print(_green("Instance booted! Tagging ...")) instance.add_tag('Name', GPU_INSTANCE_NAME) # Wait until instance is accessible via SSH sshable = False while sshable == False: print(_yellow("Waiting for SSH connection (this might take a minute) ...")) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect((instance.public_dns_name, 22)) sshable = True print(_green("SSH is now accessible!")) except socket.error as e: pass s.close() # Wrapup print(_green("Done!")) print(_green("ID: %s" % instance.id)) print(_green("Public DNS: %s" % instance.public_dns_name)) except: print(_red('Error creating instance.')) raise return
def _load_config(**kwargs): """Find and parse server config file. If `config` keyword argument wasn't set look for default 'server_config.yaml' or 'server_config.json' file. """ config, ext = os.path.splitext(kwargs.get('config', 'server_config.yaml' if os.path.exists('server_config.yaml') else 'server_config.json')) if not os.path.exists(config + ext): print _red('Error. "%s" file not found.' % (config + ext)) return {} if YAML_AVAILABLE and ext == '.yaml': loader = yaml elif JSON_AVAILABLE and ext =='.json': loader = json else: print _red('Parser package not available') return {} # Open file and deserialize settings. with open(config + ext) as config_file: return loader.load(config_file)
def createAmiImage( ec2conn, name, image_id, description ): instances = ec2conn.get_only_instances( filters = { "instance-id": image_id } ); logging.debug ( 'Instances response: ' + pprint.pformat( instances ) ) node = instances[0] logging.info (_yellow( 'Found instance id: ') + _red(node.id) ) #name = hostname ami_id = node.create_image( name, description = description, no_reboot = True, dry_run = False ) logging.debug ( 'Create image response: ' + pprint.pformat( ami_id ) ) return ami_id
def _build_group(name, servers): """Recursively walk through servers dictionary and search for all server records.""" # We're going to reference server a lot, so we'd better store it. server = servers.get(name, None) # If `name` exists in servers dictionary we if server: # check whether it's a group by looking for `members` if isinstance(server, list): if fabric.state.output['debug']: puts("%s is a group, getting members" % name) for item in server: # and call this function for each of them. _build_group(item, servers) # When, finally, we dig through to the standalone server records, we retrieve # configs and store them in `env.group` else: if fabric.state.output['debug']: puts("%s is a server, filling up env.group" % name) env.group[server['host']] = server else: print _red( 'Error. "%s" config not found. Run `fab s` to list all available configs' % name)
def gunicorn_status(): """ Check if gunicorn is running or not """ with settings(hide("running", "stdout", "stderr"), warn_only=True): result = run('if ps aux | grep -v grep | grep -i "gunicorn"; then echo 1; else echo ""; fi') if result: print (_green("Gunicorn is running fine......................")) else: print (_red("Gunicorn is not running ......................")) confirmation = confirm("Do you want to trouble shoot here??", default=True) if confirmation: print (_green("Ouputting gunicorn error logs")) with show("debug", "stdout", "stderr"): run("sudo tail -n 50 /applogs/gunicorn_error.logs")
def delete_bucket(bucket_name): '''Delete a bucket''' start_time = time.time() conn = S3Connection(ec2_key, ec2_secret) delete = prompt('Are you sure you want to delete this bucket (Y/N): ') if delete.upper() == 'Y': try: bucket = conn.get_bucket(bucket_name=bucket_name) print(_green("Deleting bucket...")) conn.delete_bucket(bucket) print _yellow('Bucket "%s" successfully deleted' % bucket_name) except Exception, e: print(_red('Delete bucket error: {}'.format(str(e))))
def mongo_status(): """ Check if nginx is installed. """ with settings(hide("running", "stderr", "stdout")): result = run('if ps aux | grep -v grep | grep -i "mongodb"; then echo 1; else echo ""; fi') if result: print (_green("Mongodb is running fine......................")) else: print (_red("Mongodb is not running ......................")) confirmation = confirm("Do you want to trouble shoot here??it will delete mongo.lock file", default=True) if confirmation: run("sudo rm -rf /var/lib/mongodb/mongod.lock ") run("sudo service mongodb restart") return
def _virtualenv(command, env_path=None): """ Run a command in the virtualenv. This prefixes the command with the source command. Usage: virtualenv('pip install django') """ if env_path is None: abort( _red( "You must provide the parameter 'env_path' to the virtualenv function" )) source = 'source %s/bin/activate && ' % env_path print(source + command) local(source + command)
def remove_virtualenv_local(virtual_env_path=env.local_virtual_env_path): """ * removes the local virtualenv environment, relative to the project. Usage: remove_localenv:path_to_env remove_localenv Options: path_to_env: Optional. Will default to one directory up from fabfile named 'env'. Effectively the path ['../']. """ #TODO need error catching if os.path.isdir(virtual_env_path): print("Removing %s" % virtual_env_path) shutil.rmtree(virtual_env_path) else: print(_red("Virtualenv not found [%s]." % virtual_env_path))
def create_virtualenv_local(virtual_env_path=env.local_virtual_env_path): """ * create a local virtualenv environment. Usage: create_virtual_local:path_to_env create_virtual_local Options: path_toenv: Optional. Will default to one directory up from fabfile named 'env'. Effectively the path ['../']. """ if not os.path.isdir(virtual_env_path): with settings(warn_only=True): result = local("virtualenv --no-site-packages --distribute %s" % virtual_env_path) if result.failed and not confirm(_white("Installing virtualenv failed. Continue anyway?"), default=False): abort(_red("Aborting local staging.")) print(_green("Virtualenv installed and detected."))
def nginx_status(): """ Check if nginx is installed. """ with settings(hide("running", "stderr", "stdout")): result = run('if ps aux | grep -v grep | grep -i "nginx"; then echo 1; else echo ""; fi') if result: print (_green("Nginx is running fine......................")) else: print (_red("Nginx is not running ......................")) confirmation = confirm("Do you want to trouble shoot here??", default=True) if confirmation: print (_green("Checking nginx configuration file")) with show("debug", "stdout", "stderr"): run("sudo nginx -t") run("sudo service nginx restart") run("sudo tail -n 50 /applogs/nginx_error.logs") return
def _build_ansible_playbook(self, features): """ Write ansible playbook from application features """ with open(self._ansible_playbook_path, "w") as stream_features: log( "Ansible - Writing playbook: {0}".format( self._ansible_playbook_path), self._log_file) log( _yellow("Ansible - features: {0}".format( features[-1]['roles'])), self._log_file) try: yaml.safe_dump(features, stream_features, default_flow_style=False, explicit_start=True, allow_unicode=True) except yaml.YAMLError as exc: log(_red("Ansible - ERROR Writing playbook: {0}".format(exc)), self._log_file) raise
def create_virtualenv_local(virtual_env_path=env.local_virtual_env_path): """ * create a local virtualenv environment. Usage: create_virtual_local:path_to_env create_virtual_local Options: path_toenv: Optional. Will default to one directory up from fabfile named 'env'. Effectively the path ['../']. """ if not os.path.isdir(virtual_env_path): with settings(warn_only=True): result = local("virtualenv --no-site-packages --distribute %s" % virtual_env_path) if result.failed and not confirm( _white("Installing virtualenv failed. Continue anyway?"), default=False): abort(_red("Aborting local staging.")) print(_green("Virtualenv installed and detected."))
def log(service_name): """ Show logs of nginx/celery/celerybeat/gunicorn """ start_time = time.time() print(_green("Started...")) env.environment = None while env.environment not in ('Staging', 'Production'): environment = prompt('Please specify target environment: ') setattr(env, 'environment', environment.strip().capitalize()) try: fabconf, env_config = parse_ini('appserver', check_all=False) except Exception as e: print(_red('Exception parsing config file: {}'.format(str(e)))) exit() env.user = fabconf['SERVER_USERNAME'] env.key_filename = fabconf['SSH_PRIVATE_KEY_PATH'] from recipes.default_appserver import log_services as recipe command = recipe['%s' % service_name] from misc import _oven conn = boto.connect_ec2(ec2_key, ec2_secret) reservations = conn.get_all_instances() instances = [i for r in reservations for i in r.instances] for instance in instances: tags = instance.tags if instance.state == 'running' and 'Env' in tags: if tags['Env'] == env.environment and tags['Name'] == 'AppServer': print(_yellow('Restarting service on instance: %s' % instance.id)) env.host_string = instance.public_dns_name env.user = fabconf['SERVER_USERNAME'] env.key_filename = fabconf['SSH_PRIVATE_KEY_PATH'] _oven(command) end_time = time.time() print(_green("Runtime: %f minutes" % ((end_time - start_time) / 60))) print(_green(env.host_string))
def mongo_status(): """ Check if mongodb is running fine """ print("\n\n\t\t%s" % _yellow("Checking mongo status")) with settings(hide("running", "stderr", "stdout")): result = run( 'if ps aux | grep -v grep | grep -i "mongodb"; then echo 1; else echo ""; fi' ) if result: print("\n\n%s\n\n" % _green("Mongodb is running fine......................")) else: print(_red("Mongodb is not running ......................")) confirmation = confirm( "Do you want to trouble shoot here??it will delete mongo.lock file", default=True) if confirmation: run("sudo rm -rf /var/lib/mongodb/mongod.lock ") run("sudo service mongodb restart") return
def nginx_status(): """ Check if nginx is running fine or not. """ print("\t\t%s" % _yellow("Checking nginx status")) with settings(hide("running", "stderr", "stdout")): result = run( 'if ps aux | grep -v grep | grep -i "nginx"; then echo 1; else echo ""; fi' ) if result: print(_green("Nginx is running fine......................")) else: print(_red("Nginx is not running ......................")) confirmation = confirm("Do you want to trouble shoot here??", default=True) if confirmation: print(_green("Checking nginx configuration file")) with show("debug", "stdout", "stderr"): run("sudo nginx -t") run("sudo service nginx restart") run("sudo tail -n 50 /applogs/nginx_error.logs") return
def add_to_load_balancer(ec2, load_balancer_name, instance, instance_availability_zone): load_balancer = ec2.elb_conn.get_all_load_balancers( ['%s' % load_balancer_name])[0] try: ''' check to make sure instance availability zone is covered by load balancer. ''' if instance_availability_zone: if not load_balancer.is_cross_zone_load_balancing(): load_balancer.enable_cross_zone_load_balancing() load_balancer.enable_zones(['%s' % instance_availability_zone]) ''' register new instance with load balancer ''' load_balancer.register_instances(['%s' % instance]) print(_yellow("EC2 Instance added to load balancer.")) except Exception as e: print( _red( "Failed to add EC2 Instance to load balancer. Please add instance manually." ))
def _destroy_instances(self): log(_green("STATE: Started"), self._log_file) log(" CONF: Region: {0}".format(self._app['region']), self._log_file) try: log( _yellow( " INFO: Destroy all EC2 instances related to app {0} [{1}]" .format(get_app_friendly_name(self._app), self._app['_id'])), self._log_file) destroy_ec2_instances(self._cloud_connection, self._app, self._log_file) self._worker.update_status( "done", message="Instance deletion OK: [{0}]".format( self._app['name'])) log(_green("STATE: End"), self._log_file) except Exception as e: self._worker.update_status( "failed", message="Destroy all instances Failed: [{0}]\n{1}".format( self._app['name'], str(e))) log(_red("STATE: End"), self._log_file)
def red(msg): """ prints it back in red """ print(_red(msg))
def create_ec2_instance(cloud_connection, app, app_color, config, private_ip_address, subnet_id, log_file): """ Creates an EC2 instance and return its ID. :param cloud_connection: The app Cloud Connection object :param app: Ghost app document :param app_color: Color value if BlueGreen application type :param config: Ghost config settings :param private_ip_address: Private IP address to use when creating the instance :param subnet_id: Subnet to use when creating the instance :param log_file: Logging file :return the EC2 instance object with all its details """ log(_yellow(" INFO: Creating User-Data"), log_file) ghost_root_path = config.get('ghost_root_path', '/usr/local/share/ghost/') userdata = generate_userdata(config['bucket_s3'], config.get('bucket_region', app['region']), ghost_root_path) log(_yellow(" INFO: Creating EC2 instance"), log_file) if app['ami']: log(" CONF: AMI: {0}".format(app['ami']), log_file) log(" CONF: Region: {0}".format(app['region']), log_file) conn = cloud_connection.get_connection(app['region'], ["ec2"]) interface = cloud_connection.launch_service( ["ec2", "networkinterface", "NetworkInterfaceSpecification"], subnet_id=subnet_id, groups=app['environment_infos']['security_groups'], associate_public_ip_address=app['environment_infos'].get('public_ip_address', True), private_ip_address=private_ip_address ) interfaces = cloud_connection.launch_service( ["ec2", "networkinterface", "NetworkInterfaceCollection"], interface ) if 'root_block_device' in app['environment_infos']: bdm = create_block_device(cloud_connection, app['region'], app, app['environment_infos']['root_block_device']) else: bdm = create_block_device(cloud_connection, app['region'], app, {}) reservation = conn.run_instances( image_id=app['ami'], key_name=app['environment_infos']['key_name'], network_interfaces=interfaces, instance_type=app['instance_type'], instance_profile_name=app['environment_infos']['instance_profile'], user_data=userdata, block_device_map=bdm ) # Getting instance metadata instance = reservation.instances[0] if instance.id: # Checking if instance is ready before tagging while not instance.state == u'running': log('Instance not running, waiting 10s before tagging.', log_file) time.sleep(10) instance.update() # Tagging for ghost_tag_key, ghost_tag_val in {'app': 'name', 'app_id': '_id', 'env': 'env', 'role': 'role'}.iteritems(): log("Tagging instance [{id}] with '{tk}':'{tv}'".format(id=instance.id, tk=ghost_tag_key, tv=str(app[ghost_tag_val])), log_file) conn.create_tags([instance.id], {ghost_tag_key: str(app[ghost_tag_val])}) if app_color: log("Tagging instance [{id}] with '{tk}':'{tv}'".format(id=instance.id, tk='color', tv=app_color), log_file) conn.create_tags([instance.id], {"color": app_color}) tag_ec2_name = False if 'instance_tags' in app['environment_infos']: for app_tag in app['environment_infos']['instance_tags']: log("Tagging instance [{id}] with '{tk}':'{tv}'".format(id=instance.id, tk=app_tag['tag_name'], tv=app_tag['tag_value']), log_file) conn.create_tags([instance.id], {app_tag['tag_name']: app_tag['tag_value']}) if app_tag['tag_name'] == 'Name': tag_ec2_name = True if not tag_ec2_name: ec2_name = "ec2.{0}.{1}.{2}".format(app['env'], app['role'], app['name']) log("Tagging instance [{id}] with '{tk}':'{tv}'".format(id=instance.id, tk='Name', tv=ec2_name), log_file) conn.create_tags([instance.id], {'Name': ec2_name}) log(" CONF: Private IP: %s" % instance.private_ip_address, log_file) log(" CONF: Public IP: %s" % instance.ip_address, log_file) log(" CONF: Public DNS: %s" % instance.public_dns_name, log_file) return instance else: log(_red("ERROR: Cannot get instance metadata. Please check the AWS Console."), log_file) raise GCallException("ERROR: Cannot get instance metadata. Please check the AWS Console.") else: log(_red("ERROR: No AMI set, please use buildimage before"), log_file) raise GCallException("ERROR: No AMI set, please use buildimage before") return None
def _get_notification_message_failed(app, msg): notif = "Blue/green purge failed for [{0}] : {1}".format( get_app_friendly_name(app), msg) return _red(notif)
def execute(self): try: log(_green("STATE: Started"), self._log_file) rolling_update_strategy = self._job['options'][ 0] if 'options' in self._job and len( self._job['options']) > 0 else None as_conn = self._cloud_connection.get_connection( self._app['region'], ['autoscaling'], boto_version='boto3') if not self._app['autoscale']['name'] or not check_autoscale_exists( self._cloud_connection, self._app['autoscale']['name'], self._app['region']): log( _yellow( " INFO: No AutoScale specified, this command will destroy and recreate standalone instances" ), self._log_file) destroyed_instances_info = destroy_ec2_instances( self._cloud_connection, self._app, self._log_file, "running") destroyed_count = len(destroyed_instances_info) if destroyed_count == 0: self._worker.update_status( "aborted", message= "Re-create instances aborted, no instances found: [{0}]" .format(self._app['name'])) return log( _yellow( " INFO: Waiting for instances to be destroyed before re-creating them with the same network parameters." ), self._log_file) while not test_ec2_instance_status( self._cloud_connection, self._app['region'], [host['id'] for host in destroyed_instances_info], "terminated"): log("Waiting 10s", self._log_file) time.sleep(10) x = 0 while x < destroyed_count: create_ec2_instance( self._cloud_connection, self._app, self._color, self._config, destroyed_instances_info[x]['private_ip_address'], destroyed_instances_info[x]['subnet_id'], self._log_file) x += 1 self._worker.update_status( "done", message="Re-create instances OK: [{0}]".format( self._app['name'])) log(_green("STATE: End"), self._log_file) else: if rolling_update_strategy: log( _yellow( " INFO: Destroy all EC2 instances related to app {0} [{1}] using rolling update strategy ({2})" .format(get_app_friendly_name(self._app), self._app['_id'], rolling_update_strategy)), self._log_file) else: log( _yellow( " INFO: Destroy all EC2 instances related to app {0} [{1}] and let the AutoScale ({2}) recreate them" .format(get_app_friendly_name(self._app), self._app['_id'], self._app['autoscale']['name'])), self._log_file) rollup = RollingUpdate(self._cloud_connection, self._app, self._app['safe-deployment'], self._log_file) rollup.do_rolling(rolling_update_strategy) self._worker.update_status( "done", message="Re-create instances OK: [{0}]".format( self._app['name'])) log(_green("STATE: End"), self._log_file) except Exception as e: self._worker.update_status( "failed", message="Re-create instances Failed: [{0}]\n{1}".format( self._app['name'], str(e))) log(_red("STATE: END"), self._log_file)
def _get_notification_message_failed(online_app, offline_app, e): app_name = get_app_friendly_name(online_app) notif = "Blue/green preparation failed for [{0}] between [{1}] and [{2}]: {3}".format( app_name, online_app['_id'], offline_app['_id'], str(e)) return _red(notif)
def gpu_go(): _set_env() if env.instance: api.local('ssh %s@%s' % (env.user, env.host)) else: print(_red("No GPU instance running. Try fab gpu_up first."))
def _get_notification_message_failed(online_app, to_deploy_app, msg): app_name = get_app_friendly_name(online_app) notif = "Blue/green swap failed for [{0}] between [{1}] and [{2}]: {3}".format( app_name, online_app['_id'], to_deploy_app['_id'], msg) return _red(notif)
def _swap_asg(self, lb_mgr, swap_execution_strategy, online_app, to_deploy_app, log_file): """ Swap group of instances from A to B atatched to the main ELB :param swap_execution_strategy: string: The swap strategy which can be 'isolated' or 'overlap' :param online_app: object: Ghost app - ASG instances to de-register :param to_deploy_app: object: Ghost app - ASG instances to register :param log_file: str: :return tuple (Main ELB name, Main ELB dns) """ app_region = self._app['region'] as_conn3 = self._cloud_connection.get_connection(app_region, ['autoscaling'], boto_version='boto3') # Retrieve autoscaling infos, if any as_group_old, as_group_old_processes_to_suspend = get_autoscaling_group_and_processes_to_suspend( as_conn3, online_app, log_file) as_group_new, as_group_new_processes_to_suspend = get_autoscaling_group_and_processes_to_suspend( as_conn3, to_deploy_app, log_file) # Retrieve ELB instances elb_online_instances = lb_mgr.get_instances_status_from_autoscale( online_app['autoscale']['name'], log_file) log( _green('Online configuration : {0}'.format( str(elb_online_instances))), self._log_file) elb_tempwarm_instances = lb_mgr.get_instances_status_from_autoscale( to_deploy_app['autoscale']['name'], log_file) log( _green('Offline configuration : {0}'.format( str(elb_tempwarm_instances))), self._log_file) elb_online, health_check_config = (None, None) try: log( "Swapping using strategy '{0}'".format( swap_execution_strategy), self._log_file) # Suspend autoscaling groups suspend_autoscaling_group_processes( as_conn3, as_group_old, as_group_old_processes_to_suspend, log_file) suspend_autoscaling_group_processes( as_conn3, as_group_new, as_group_new_processes_to_suspend, log_file) # Retrieve online ELB object elb_online = lb_mgr.get_by_name(elb_online_instances.keys()[0]) health_check_config = lb_mgr.get_health_check(elb_online.name) log( _green( 'Changing HealthCheck to be "minimal" on online ELB "{0}"'. format(elb_online)), self._log_file) lb_mgr.configure_health_check( elb_online.name, interval=get_blue_green_config(self._config, 'swapbluegreen', 'healthcheck_interval', 5), timeout=get_blue_green_config(self._config, 'swapbluegreen', 'healthcheck_timeout', 2), healthy_threshold=get_blue_green_config( self._config, 'swapbluegreen', 'healthcheck_healthy_threshold', 2)) if swap_execution_strategy == 'isolated': log( _green( 'De-register all online instances from ELB {0}'.format( ', '.join(elb_online_instances.keys()))), self._log_file) lb_mgr.deregister_all_instances_from_lbs( elb_online_instances, self._log_file) self._wait_draining_connection(lb_mgr, elb_online_instances.keys()) log( _green( 'Register and put online new instances to online ELB {0}' .format(', '.join(elb_online_instances.keys()))), self._log_file) lb_mgr.register_all_instances_to_lbs( elb_online_instances.keys(), elb_tempwarm_instances, self._log_file) elif swap_execution_strategy == 'overlap': log( _green('De-register old instances from ELB {0}'.format( ', '.join(elb_online_instances.keys()))), self._log_file) lb_mgr.deregister_all_instances_from_lbs( elb_online_instances, self._log_file) log( _green('Register new instances in the ELB: {0}'.format( elb_online['LoadBalancerName'])), self._log_file) lb_mgr.register_all_instances_to_lbs( elb_online_instances.keys(), elb_tempwarm_instances, self._log_file) else: log( "Invalid swap execution strategy selected : '{0}'. " "Please choose between 'isolated' and 'overlap'".format( swap_execution_strategy), self._log_file) return None, None if not self._wait_until_instances_registered( lb_mgr, elb_online_instances.keys(), get_blue_green_config(self._config, 'swapbluegreen', 'registreation_timeout', 45)): log( _red( "Timeout reached while waiting the instances registration. Rollback process launch" ), self._log_file) lb_mgr.deregister_instances_from_lbs( elb_online_instances.keys(), elb_tempwarm_instances[ elb_tempwarm_instances.keys()[0]].keys(), self._log_file) lb_mgr.register_all_instances_to_lbs( elb_online_instances.keys(), elb_online_instances, self._log_file) lb_mgr.register_all_instances_to_lbs( elb_tempwarm_instances.keys(), elb_tempwarm_instances, self._log_file) log(_yellow("Rollback completed."), self._log_file) return None, None log( _green('De-register all instances from temp (warm) ELB {0}'. format(', '.join(elb_tempwarm_instances.keys()))), self._log_file) lb_mgr.deregister_all_instances_from_lbs(elb_tempwarm_instances, self._log_file) log( _green( 'Register old instances to Temp ELB {0} (usefull for another Rollback Swap)' .format(', '.join(elb_tempwarm_instances.keys()))), self._log_file) lb_mgr.register_all_instances_to_lbs(elb_tempwarm_instances.keys(), elb_online_instances, self._log_file) log(_green('Update autoscale groups with their new ELB'), self._log_file) lb_mgr.register_lbs_into_autoscale( to_deploy_app['autoscale']['name'], elb_tempwarm_instances.keys(), elb_online_instances.keys(), self._log_file) lb_mgr.register_lbs_into_autoscale(online_app['autoscale']['name'], elb_online_instances.keys(), elb_tempwarm_instances.keys(), self._log_file) # Update _is_online field in DB on both app self._update_app_is_online(online_app, False) # no more online anymore self._update_app_is_online(to_deploy_app, True) # promotion ! online_elb_name = elb_online_instances.keys()[0] return str(online_elb_name), lb_mgr.get_dns_name(online_elb_name) finally: if elb_online and health_check_config: log( _green( 'Restoring original HealthCheck config on online ELB "{0}"' .format(elb_online['LoadBalancerName'])), self._log_file) lb_mgr.configure_health_check(elb_online['LoadBalancerName'], **health_check_config) resume_autoscaling_group_processes( as_conn3, as_group_old, as_group_old_processes_to_suspend, log_file) resume_autoscaling_group_processes( as_conn3, as_group_new, as_group_new_processes_to_suspend, log_file)
def error(self): print(_red(self.message))
def error_exit(self): print(_red("Failed! Exiting.")) print(_red(self.message)) raise SystemExit()