def _create_ec2_instance(): """ Creates EC2 Instance """ print(_yellow("Creating instance")) conn = boto.ec2.connect_to_region(ec2_region, aws_access_key_id=fabconf['AWS_ACCESS_KEY'], aws_secret_access_key=fabconf['AWS_SECRET_KEY']) image = conn.get_all_images(ec2_amis) reservation = image[0].run(1, 1, ec2_keypair, ec2_secgroups, instance_type=ec2_instancetype) this_instance = reservation.instances[0] conn.create_tags([this_instance.id], {"Name": fabconf['INSTANCE_NAME_TAG']}) while this_instance.state == u'pending': print(_yellow("Instance state: %s" % this_instance.state)) time.sleep(10) this_instance.update() print(_green("Instance state: %s" % this_instance.state)) print(_green("Public dns: %s" % this_instance.public_dns_name)) return this_instance.public_dns_name
def _create_ec2_instance(): """ Creates EC2 Instance """ print(_yellow("Creating instance")) conn = boto.ec2.connect_to_region( ec2_region, aws_access_key_id=fabconf['AWS_ACCESS_KEY'], aws_secret_access_key=fabconf['AWS_SECRET_KEY']) image = conn.get_all_images(ec2_amis) reservation = image[0].run(1, 1, ec2_keypair, ec2_secgroups, instance_type=ec2_instancetype) instance = reservation.instances[0] conn.create_tags([instance.id], {"Name": fabconf['INSTANCE_NAME_TAG']}) while instance.state == u'pending': print(_yellow("Instance state: %s" % instance.state)) time.sleep(10) instance.update() print(_green("Instance state: %s" % instance.state)) print(_green("Public dns: %s" % instance.public_dns_name)) return instance.public_dns_name
def heroku_deploy(): if not heroku_remote_exists(): # check if heroku toolbelt is available heroku_bin = local('which heroku', capture=True) if not heroku_bin: install_heroku_toolbelt() # log in to heroku selected_account = heroku_select_account() local('heroku login --account %s' % selected_account) # select or create an app app_name = heroku_select_or_create_app(selected_account) # add redis ? add_redis = prompt(_white("Type 'y' to add Redis To Go to your Heroku application " "(to use an external service, update your settings file): ")) or None if add_redis.lower() == 'y': heroku_add_redis(app=app_name, account=selected_account) check_git() # add git remote local('git remote add heroku heroku.com:%s.git' % app_name) print(_green("You are ready to commit any changes and deploy to heroku by typing: %s" % _white("git push heroku <name_of_branch>"))) return None print(_green("Looks like there is already a heroku remote set up. Huzzah!"))
def allocate_and_assign_ip(name): """ """ print(_green("Assigning an elastic IP to {}...".format(name))) conn = connect_to_ec2() filters = {"tag:Name": name} ip = None for reservation in conn.get_all_instances(filters=filters): for instance in reservation.instances: ip = conn.allocate_address() ip.associate(instance.id) break break if ip: with open("deploy/fab_hosts/{}.txt".format(name), "w") as f: f.write(ip.public_ip) # update ssh address with open(os.path.join(env.ssh_directory, ''.join([name, '.json'])), 'r') as f: # noqa host_data = json.load(f) host_data['host_string'] = ip.public_ip with open(os.path.join(env.ssh_directory, ''.join([name, '.json'])), 'w') as f: # noqa json.dump(host_data, f) # update node address with open("deploy/settings.json", 'r') as f: # noqa settings_json = json.load(f) settings_json['EC2_DNS'] = ip.public_ip with open("deploy/settings.json", 'w') as f: # noqa json.dump(settings_json, f) print(_green("Your new elastic IP is {}.".format(ip.public_ip)))
def create_instances(): """ Creates EC2 Instance """ print(_green("Started...")) print(_yellow("...Creating EC2 instance(s)...")) ec2_region = config.get(Config.AWS, Config.AWS_EC2_REGION) ec2_key = config.get(Config.AWS, Config.AWS_ACCESS_KEY) ec2_secret = config.get(Config.AWS, Config.AWS_SECRET_KEY) conn = boto.ec2.connect_to_region(ec2_region, aws_access_key_id=ec2_key, aws_secret_access_key=ec2_secret) ec2_ami = config.get(Config.AWS, Config.AWS_EC2_AMI) image = conn.get_image(ec2_ami) ec2_key_pair = config.get(Config.AWS, Config.AWS_EC2_KEY_PAIR) ec2_instance_type = config.get(Config.AWS, Config.AWS_EC2_INSTANCE_TYPE) num_instances = config.get(Config.AWS, Config.AWS_NUM_INSTANCES) # must give num_instances twice because 1 min num and 1 max num reservation = image[0].run(num_instances, num_instances, key_name=ec2_key_pair, instance_type=ec2_instance_type) while check_instances_pending(reservation.instances): print(_yellow("Instances still pending")) time.sleep(10) for instance in reservation.instances: print(_green("Instance state: %s" % instance.state)) print(_green("Public dns: %s" % instance.public_dns_name))
def s(*args, **kwargs): """Set destination servers or server groups by comma delimited list of names""" # Load config servers = _load_config(**kwargs) # If no arguments were recieved, print a message with a list of available configs. if not args: print 'No server name given. Available configs:' for key in servers: print _green('\t%s' % key) # Create `group` - a dictionary, containing copies of configs for selected servers. Server hosts # are used as dictionary keys, which allows us to connect current command destination host with # the correct config. This is important, because somewhere along the way fabric messes up the # hosts order, so simple list index incrementation won't suffice. env.group = {} # For each given server name for name in args: # Recursive function call to retrieve all server records. If `name` is a group(e.g. `all`) # - get it's members, iterate through them and create `group` # record. Else, get fields from `name` server record. # If requested server is not in the settings dictionary output error message and list all # available servers. _build_group(name, servers) # Copy server hosts from `env.group` keys - this gives us a complete list of unique hosts to # operate on. No host is added twice, so we can safely add overlaping groups. Each added host is # guaranteed to have a config record in `env.group`. env.hosts = env.group.keys()
def execute(self): subnet_id = self._job['options'][0] if 'options' in self._job and len( self._job['options'] ) > 0 else self._app['environment_infos']['subnet_ids'][0] private_ip_address = self._job['options'][ 1] if 'options' in self._job and len( self._job['options']) > 1 else None try: log(_green("STATE: Started"), self._log_file) instance = create_ec2_instance(self._cloud_connection, self._app, self._color, self._config, private_ip_address, subnet_id, self._log_file) self._worker.update_status( "done", message="Creating Instance OK: [{0}]\n\nPublic IP: {1}".format( self._app['name'], str(instance.ip_address))) log(_green("STATE: End"), self._log_file) except Exception as e: self._worker.update_status( "failed", message="Creating Instance Failed: [{0}]\n{1}".format( self._app['name'], e)) log(_red("STATE: END"), self._log_file)
def allocate_and_assign_ip(name): """ """ print (_green("Assigning an elastic IP to {}...".format(name))) conn = connect_to_ec2() filters = {"tag:Name": name} ip = None for reservation in conn.get_all_instances(filters=filters): for instance in reservation.instances: ip = conn.allocate_address() ip.associate(instance.id) break break if ip: with open("deploy/fab_hosts/{}.txt".format(name), "w") as f: f.write(ip.public_ip) # update ssh address with open(os.path.join(env.ssh_directory, "".join([name, ".json"])), "r") as f: # noqa host_data = json.load(f) host_data["host_string"] = ip.public_ip with open(os.path.join(env.ssh_directory, "".join([name, ".json"])), "w") as f: # noqa json.dump(host_data, f) # update node address with open("deploy/settings.json", "r") as f: # noqa settings_json = json.load(f) settings_json["EC2_DNS"] = ip.public_ip with open("deploy/settings.json", "w") as f: # noqa json.dump(settings_json, f) print (_green("Your new elastic IP is {}.".format(ip.public_ip)))
def launch_instance(count): """ Provisions ec2 instance(s). For eg, to launch 3 instances you would run: fab launch_instance:3 """ print(_green("Started........")) print(_yellow("...............Creating EC2 instance(s)...")) conn = boto.connect_ec2(aws_access_key_id="XXXXX", aws_secret_access_key="XXXXXX") image = conn.get_all_images("ami-4b814f22") reservation = image[0].run(count, count, key_name="fabric", security_groups=['default'], instance_type="m1.small", user_data=get_script()) instance_list = reservation.instances print instance_list for instance in instance_list: conn.create_tags([instance.id], {"Name":"jetty"}) while instance.state == u'pending': print(_yellow("Instance state: %s" % instance.state)) time.sleep(5) instance.update() print(_green("Instance state: %s" % instance.state)) print(_green("Public dns: %s" % instance.public_dns_name))
def _create_server(): """ Creates EC2 Instance """ print(_yellow("Creating instance")) conn = boto.connect_ec2(ec2_key, ec2_secret) assert ec2_amis is not None image = conn.get_all_images(ec2_amis) reservation = image[0].run(1, 1, ec2_keypair, ec2_secgroups, instance_type=ec2_instancetype) instance = reservation.instances[0] conn.create_tags([instance.id], {"Name": fabconf['INSTANCE_NAME_TAG']}) conn.create_tags([instance.id], {"Env": env.environment}) conn.create_tags([instance.id], {"Recipe": fabconf['INSTANCE_RECIPE']}) while instance.state == u'pending': print(_yellow("Instance state: %s" % instance.state)) time.sleep(10) instance.update() print(_green("Instance state: %s" % instance.state)) print(_green("Public dns: %s" % instance.public_dns_name)) return instance.public_dns_name, instance.id
def create_server(): """ Creates EC2 Instance """ print(_green("Started...")) print(_yellow("...Creating EC2 instance...")) conn = boto.ec2.connect_to_region('us-west-2', aws_access_key_id=ec2_key, aws_secret_access_key=ec2_secret) image = conn.get_all_images(ec2_amis) group = conn.get_all_security_groups(groupnames=['quicklaunch-2'])[0] group.authorize(ip_protocol='tcp', from_port='22', to_port='22', cidr_ip='0.0.0.0/0') group.authorize(ip_protocol='tcp', from_port='80', to_port='80', cidr_ip='0.0.0.0/0') reservation = image[0].run(1, 1, key_name=ec2_key_pair, security_groups=ec2_security, instance_type=ec2_instancetype) instance = reservation.instances[0] conn.create_tags([instance.id], {"Name":config['INSTANCE_NAME_TAG']}) while instance.state == u'pending': print(_yellow("Instance state: %s" % instance.state)) time.sleep(10) instance.update() print(_green("Instance state: %s" % instance.state)) print(_green("Public dns: %s" % instance.public_dns_name)) return instance.public_dns_name # install the things # deploy with github
def create_server(): """ Creates EC2 Instance """ print(_green("Started...")) print(_yellow("...Creating EC2 instance...")) conn = boto.ec2.connect_to_region( settings.EC2_REGION, aws_access_key_id=settings.AWS_ACCESS_KEY_ID, aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY) image = conn.get_image(settings.EC2_AMI) reservation = image.run(1, 1, key_name=settings.EC2_KEY_PAIR, security_groups={settings.EC2_SECURITY}, instance_type=settings.EC2_INSTANCE_TYPE) instance = reservation.instances[0] conn.create_tags([instance.id], {"Name": settings.EC2_TAG}) while instance.state == u'pending': print(_yellow("Instance state: %s" % instance.state)) time.sleep(10) instance.update() print(_green("Instance state: %s" % instance.state)) print(_green("Public dns: %s" % instance.public_dns_name)) return instance.public_dns_name
def server(): """ Creates an EC2 Instance """ print(_yellow("Creating EC2 instance...")) conn = boto.ec2.connect_to_region( AWS_REGION, aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY ) image = conn.get_all_images(AWS_AMIS) group = conn.get_all_security_groups(groupnames=[AWS_SECURITY])[0] reservation = image[0].run( 1, 1, key_name=AWS_KEYPAIR, security_groups=AWS_SECURITY, instance_type=AWS_INSTANCE_TYPE ) instance = reservation.instances[0] conn.create_tags([instance.id], {"Name": INSTANCE_NAME_TAG}) while instance.state == u"pending": print(_yellow("Instance state: %s" % instance.state)) time.sleep(10) instance.update() print(_green("Instance state: %s" % instance.state)) print(_green("Public dns: %s" % instance.public_dns_name)) return instance.public_dns_name
def create_server(): """ Creates EC2 Instance """ print(_green("Started...")) print(_yellow("...Creating EC2 instance...")) conn = boto.ec2.connect_to_region(settings.EC2_REGION, aws_access_key_id=settings.AWS_ACCESS_KEY_ID, aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY) image = conn.get_image(settings.EC2_AMI) reservation = image.run(1, 1, key_name=settings.EC2_KEY_PAIR, security_groups={settings.EC2_SECURITY}, instance_type=settings.EC2_INSTANCE_TYPE) instance = reservation.instances[0] conn.create_tags([instance.id], {"Name":settings.EC2_TAG}) while instance.state == u'pending': print(_yellow("Instance state: %s" % instance.state)) time.sleep(10) instance.update() print(_green("Instance state: %s" % instance.state)) print(_green("Public dns: %s" % instance.public_dns_name)) return instance.public_dns_name
def restart_gunicorn(): with cd("/home/ubuntu/VirtualEnvironment/"): with prefix("source bin/activate"): result = run('if ps aux | grep -v grep | grep -i "gunicorn"; then echo 1; else echo ""; fi') if result: print ("\n\n%s\n\n"%_green("Gunicorn is running")) confirmation = confirm("Do you want to restart gunicorn", default=True) if confirmation: pid = run("ps aux | grep gunicorn | awk 'NR==1 {print $2}'") run("sudo kill -9 %s"%pid) result = run('if ps aux | grep -v grep | grep -i "gunicorn"; then echo 1; else echo ""; fi') if not result: print ("\n\n%s\n\n"%_red("Gunicorn has been stopped and is starting with new repo")) with cd("canworks"): run("gunicorn -c canworks/configs/gunicorn_config.py api:app") result = run('if ps aux | grep -v grep | grep -i "gunicorn"; then echo 1; else echo ""; fi') if result: print ("\n\n%s\n\n"%_green("Gunicorn is running")) else: print ("\n\n%s\n\n"%_red("Gunicorn is not running, U need to login to the server")) else: print ("\n\n%s\n\n"%_red("Gunicorn has not been stopped")) return
def create_server(): """ Creates EC2 Instance """ print(_green("Started...")) print(_red("...Creating Funnnnn EC2 instance...")) conn = boto.ec2.connect_to_region(ec2_region, aws_access_key_id=ec2_key, aws_secret_access_key=ec2_secret) image = conn.get_all_images(ec2_amis) reservation = image[0].run(1, 1, key_name=ec2_key_pair,instance_type=ec2_instancetype,user_data=user_data_script) instance = reservation.instances[0] conn.create_tags([instance.id], {Key:Tag}) conn.create_tags([instance.id], {'Name':Name}) while instance.state == u'pending': print(_yellow("Instance state: %s" % instance.state)) time.sleep(10) instance.update() print(_green("Instance state: %s" % instance.state)) print(_green("Public dns: %s" % instance.public_dns_name)) return instance.public_dns_name
def execute(self): if not boolify(self._config.get('enable_executescript_command', True)): return self._abort( "This command has been disabled by your administrator.") script = self._job['options'][0] if 'options' in self._job and len( self._job['options']) > 0 else None module_name = self._job['options'][1] if 'options' in self._job and len( self._job['options']) > 1 else None execution_strategy = self._job['options'][ 2] if 'options' in self._job and len( self._job['options']) > 2 else None if execution_strategy == 'single': # option[3] is a single Host IP fabric_execution_strategy = None safe_deployment_strategy = None single_host_ip = self._job['options'][ 3] if 'options' in self._job and len( self._job['options']) > 3 else None else: # option[2] is fabric type, option[3] might be Safe deploy group param fabric_execution_strategy = execution_strategy safe_deployment_strategy = self._job['options'][ 3] if 'options' in self._job and len( self._job['options']) > 3 else None single_host_ip = None try: log(_green("STATE: Started"), self._log_file) try: if not script or not script.strip(): return self._abort("No valid script provided") script_data = b64decode_utf8(script) allowed_shebang = ('#!/bin/bash', '#! /bin/bash', '#!/bin/sh', '#! /bin/sh') if not script_data.startswith(allowed_shebang): return self._abort( "No valid shell script provided (shebang missing)") except: return self._abort("No valid script provided") if single_host_ip: log( _yellow("Executing script on a single host: %s" % single_host_ip), self._log_file) self._exec_script_single_host(script_data, module_name, single_host_ip) else: log(_yellow("Executing script on every running instance"), self._log_file) self._exec_script(script_data, module_name, fabric_execution_strategy, safe_deployment_strategy) self._worker.update_status( "done", message=self._get_notification_message_done()) log(_green("STATE: End"), self._log_file) except Exception as e: self._worker.update_status( "failed", message=self._get_notification_message_failed(e)) log(_red("STATE: End"), self._log_file)
def fab(): """ This does the real work for the ulous() task. Is here to provide backwards compatibility """ start_time = time.time() print(_green("Started...")) env.host_string = _create_server() print(_green("Waiting 30 seconds for server to boot...")) time.sleep(30) print(_green("Polling server...")) retries = 6 while retries > 0: retries -= 1 try: _run('ls') except NetworkError: if retries: time.sleep(5) else: raise else: break _oven() end_time = time.time() print(_green("Runtime: %f minutes" % ((end_time - start_time) / 60))) print(_green(env.host_string))
def launch_instance(count): """ Provisions ec2 instance(s). For eg, to launch 3 instances you would run: fab launch_instance:3 """ print (_green("Started........")) print (_yellow("...............Creating EC2 instance(s)...")) conn = boto.connect_ec2(aws_access_key_id="XXXXX", aws_secret_access_key="XXXXXX") image = conn.get_all_images("ami-4b814f22") reservation = image[0].run( count, count, key_name="fabric", security_groups=["default"], instance_type="m1.small", user_data=get_script() ) instance_list = reservation.instances print instance_list for instance in instance_list: conn.create_tags([instance.id], {"Name": "jetty"}) while instance.state == u"pending": print (_yellow("Instance state: %s" % instance.state)) time.sleep(5) instance.update() print (_green("Instance state: %s" % instance.state)) print (_green("Public dns: %s" % instance.public_dns_name))
def gpu_up(): _set_env() if env.host: print(_green("GPU instance already running at: " + env.host)) else: _launch_gpu() _set_env() _bootstrap_gpu() print(_green("GPU instance now running at: " + env.host))
def tag_volumes(ec2conn): print(_green('lets get info about all the volumes..')) volumes = ec2conn.get_all_volumes() for volume in volumes: instance = volume.attach_data.instance_id instance_tags = ec2conn.get_all_tags({'resource-id': instance}) for instance_tag in instance_tags: print(_green('Adding tags to Volume: ') + _red(volume.id) + ' ' + _green('Key: ') + _red(instance_tag.name) + ' ' + _green('Value: ') + _red(instance_tag.value)) volume.add_tag(instance_tag.name, instance_tag.value)
def roBoto(): print(_green("Konnichiwa, human!")) # check_env_vars("AWS_KEYPAIR", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY") # the host string is the return value of server, which is the public DNS # env.host_string = server() # print(_green("Waiting for server to boot...")) # time.sleep(30) download_services() print(_green("Domo arigato, human!"))
def fab(): """ This does the real work for the ulous() task. Is here to provide backwards compatibility """ start_time = time.time() print(_green("Started...")) env.host_string = fabconf['SERVER_PRODUCTION_HOSTNAME'] _oven() end_time = time.time() print(_green("Runtime: %f minutes" % ((end_time - start_time) / 60))) print(_green(env.host_string))
def _bootstrap_gpu(): _set_env() print(_green("Bootstrapping (watch for prompts) ...")) with api.settings(warn_only=True, host_string=env.host): api.put(local_path='./dotfiles/.bashrc', remote_path='/home/ubuntu/.bashrc') api.run('git clone %s' % env.project_repo) api.run('sudo apt-get update') api.run('sudo apt-get install python-dev python-setuptools pkg-config liblapack-dev') api.run('sudo easy_install pip') api.run('sudo -H pip install -r ~/%s/requirements.txt' % env.project_name) print(_green("Done!"))
def parse_ini(instance_type, check_all=True): parser = SafeConfigParser() parser.read(os.path.abspath('fabfile/conf/conf.ini')) parser.set('CONFIG', 'AFP_PATH', os.path.join(os.path.dirname(__file__), os.path.pardir)) parser.set('CONFIG', 'SSH_SETTING_PATH', _base('settings/ssh')) parser.set('PRODUCTION_ENV_CONFIG', 'AWS_ACCESS_KEY_ID', ec2_key) parser.set('PRODUCTION_ENV_CONFIG', 'AWS_SECRET_ACCESS_KEY', ec2_secret) parser.set('STAGING_ENV_CONFIG', 'AWS_ACCESS_KEY_ID', ec2_key) parser.set('STAGING_ENV_CONFIG', 'AWS_SECRET_ACCESS_KEY', ec2_secret) fabconf = {} _green("Parsing conf.ini file") for name, value in parser.items('CONFIG'): # print ' %s = %s' % (name, value) fabconf['%s' % name.upper()] = value for name, value in parser.items('%s' % env.environment.upper()): # print ' %s = %s' % (name, value) fabconf['%s' % name.upper()] = value if instance_type == 'messagingserver': fabconf['INSTANCE_NAME_TAG'] = "MessagingServer" fabconf['INSTANCE_RECIPE'] = "messagingserver" env_config = {} for name, value in parser.items('%s_ENV_CONFIG' % env.environment.upper()): # print ' %s = %s' % (name, value) env_config['%s' % name.upper()] = value fabconf['ENV_VARS'] = ','.join('{}="{}"'.format(i, k) for i, k in env_config.items()) env.fabconf = fabconf env.env_config = env_config env.ec2_amis = [fabconf['EC2_AMIS']] env.ec2_keypair = fabconf['EC2_KEYPAIR'] env.ec2_secgroups = [fabconf['EC2_SECGROUPS']] env.ec2_instancetype = fabconf['EC2_INSTANCETYPE'] print(_yellow("SSH private key verification...")) try: open(fabconf['SSH_PRIVATE_KEY_PATH']).read() except Exception, e: print(_red("SSH private key does not exist in the provided path %s !" % fabconf['SSH_PRIVATE_KEY_PATH'])) exit()
def status(): """ Summarry of the backup on the server """ running = len(run("ps aux |grep backup_gmail | grep -v grep", warn_only=True)) > 0 num_emails = run("find gmailbackup/gmail/MailStore -name '*eml' 2> /dev/null | wc -l") size = run("du -sh gmailbackup/gmail/MailStore | awk '{print $1}'") print _green("Status:") print "backup running : %s" % ('yes' if running else 'no') print "number of emails : %s" % (num_emails) print "size on disk : %s" % (size)
def create_bucket(bucket_name): '''Create a bucket''' start_time = time.time() conn = S3Connection(ec2_key, ec2_secret) print(_green("Creating bucket...")) try: bucket = conn.create_bucket(bucket_name=bucket_name) print _green('Bucket "%s" successfully created' % bucket_name) except Exception, e: print(_red('Create bucket error: {}'.format(str(e))))
def supervisord_status(): """ This method outputs the status of the process being run by supervisord on the remote server. """ print(_green("Getting status of the process running through supervisord...")) with prefix("cd /home/ubuntu/VirtualEnvironment &&source bin/activate && cd news_classification"): run("sudo supervisorctl status") confirmation = confirm("Do you want to trouble shoot here??", default=True) if confirmation: print (_green("Ouputting supervisor logs")) run("sudo tail -n 50 /applogs/supervisord.log")
def terminate_instance(instance_id): print(_green("Terminating instance...")) conn = boto.ec2.connect_to_region(env.ec2_region) results = conn.terminate_instances(instance_ids=[instance_id]) instance = results[0] while instance.state == u"shutting-down": print(_yellow("Instance state: %s" % instance.state)) time.sleep(15) instance.update() if instance.state == u"terminated": print(_green("Instance terminated.")) else: print(_yellow("Instance state: %s" % instance.state))
def install(): """ Install the gmailbackup scripts and all required software """ sudo('apt-get -y install git') append('~/.ssh/config', 'Host github.com') append('~/.ssh/config', '\tStrictHostKeyChecking no') if not exists('gmailbackup'): print _green("Getting gmaailbackup") run('git clone https://github.com/adamw523/gmailbackup.git') else: print(_red('Already have gmailbackup'))
def fab(): """ This does the real work for the ulous() task. Is here to provide backwards compatibility """ start_time = time.time() print(_green("Started...")) env.host_string = _create_server() print(_green("Waiting 30 seconds for server to boot...")) time.sleep(30) _oven() end_time = time.time() print(_green("Runtime: %f minutes" % ((end_time - start_time) / 60))) print(_green(env.host_string))
def terminate_instance(instance_id): print(_green("Terminating instance...")) conn = boto.ec2.connect_to_region(env.ec2_region) results = conn.terminate_instances(instance_ids=[instance_id]) instance = results[0] while instance.state == u'shutting-down': print(_yellow("Instance state: %s" % instance.state)) time.sleep(15) instance.update() if instance.state == u'terminated': print(_green("Instance terminated.")) else: print(_yellow("Instance state: %s" % instance.state))
def render(slug): ''' Compile app template into HTML ''' if not os.path.exists('apps/%s' % slug): print _red('--- App with this slug can\'t be found(%s)' % slug) return env = Environment(loader=FileSystemLoader('templates')) template = env.get_template('apps/app_template.html') # Manifest data = '' with open("apps/%s/manifest.json" % slug, "r") as manifest: data = manifest.read().replace('\n', '') context = json.loads(data) # Description description = '' with open("apps/%s/description.md" % slug, "r") as desc_file: description = desc_file.read() # Images to /static/ if not os.path.exists('static/apps/%s' % slug): os.makedirs('static/apps/%s' % slug) if context['images'].get('logo'): shutil.copy2('apps/%s/images/%s' % (slug, context['images']['logo']), 'static/apps/%s' % (slug)) for screenshot in context['images'].get('screenshots'): shutil.copy2('apps/%s/images/screenshots/%s' % (slug, screenshot), 'static/apps/%s' % (slug)) # Description context['description'] = markdown.markdown(description) # Write output if not os.path.exists('templates/apps/%s' % slug): os.makedirs('templates/apps/%s' % slug) else: # remove old files open("templates/apps/%s/index.html" % slug, "w").close() output = open("templates/apps/%s/index.html" % slug, "w") output.write('{% extends "base.html" %}\n{% block body %}\n') output.write(template.render(context)) output.write('\n{% endblock %}') output.close() print _green('--- Done rendering. You can always re-render files with') print _yellow('--- fab render:%s' % slug)
def docean_start(): """ Start the DigitalOcean server. Restored from backup if exists """ if not os.path.exists('private/gmailarchive_rsa.pub'): digio.create_ssh_key() # Make sure SSH KEY is there ssh_key_id = digio.ssh_key_id() if not ssh_key_id: print _yellow("\nPlease go to the DigitalOcean " + \ "website and make sure you have"), print _yellow("an SSH key with the following:\n") print _green("name: \n") + digio.ssh_key_name print _green("\nvalue: \n") + digio.public_key() fabric.utils.abort("SSH keys") droplet = digio.droplet() # Need to boot a droplet if droplet is None: # check for backup # restore print _green("Creating droplet...") droplet = digio.create_droplet(ssh_key=ssh_key_id) print _green("Created server with id: %s" % (droplet['id'])) # Wait for droplet to boot or restore while droplet['status'] == 'new': print _yellow("Droplet status is '%s' waiting for boot..." % \ (droplet['status'])) time.sleep(10) droplet = digio.droplet(droplet['id'])
def nginx(): """ This function installs nginx on the remote server and replaces its conf file with the one available in the git repository.Finally restart the nginx server """ run("sudo apt-get install -y nginx") with prefix("cd /home/ubuntu/VirtualEnvironment/Canworks/configs"): run("sudo cp nginx.conf /etc/nginx/nginx.conf") run("sudo cp nginx_default.conf /etc/nginx/sites-enabled/default") print(_green("Checking nginx configuration file")) run("sudo nginx -t") print("\n\n%s\n\n" % _green("Restarting nginx")) run("sudo service nginx restart")
def list_buckets(): '''List All Buckets''' start_time = time.time() conn = S3Connection(ec2_key, ec2_secret) print(_green("Listing active buckets...")) buckets = conn.get_all_buckets() x = _pretty_table(["Name", "Connection"]) for bucket in buckets: x.add_row([bucket.name, bucket.connection]) print(_yellow(x)) end_time = time.time() print(_green("Runtime: %f minutes" % ((end_time - start_time) / 60)))
def check_image_availability(ami_id): print(_green("Building AMI...")) conn = boto.ec2.connect_to_region(env.ec2_region) image = conn.get_image(ami_id) while image.state == u"pending": print(_yellow("AMI state: %s" % image.state)) time.sleep(15) image.update() if image.state == u"available": print(_green("AMI is ready.")) print(_green("AMI ID: %s" % image.id)) print(_green("AMI Name: %s" % image.name)) print(_green("AMI Description: %s" % image.description)) else: print(_yellow("AMI state: %s" % image.state))
def reset_migrations(): """Remove migrations in all apps and create new ones """ if sett.PROJECT_TYPE == sett.PROJECT_DEV: _default('local') local('rm -rf apps/*/migrations') for app in sett.APPS: with lcd(os.path.join( env.workondir, env.virtualenv, 'src', app, app)): local('rm -rf migrations') _ve_local('./manage.py reset south') for app in sett.ALLAPPS: print _green('converting %s to south' % app) _ve_local('./manage.py convert_to_south %s' % app) migrate()
def commands(dev_type=None): "List of available commands" print _green("\n%s fabric script." % sett.PROJECT_NAME) print """ Usage: fab [localhost|stage|prod] (ve:virtualenv) command To use another virtualenv as the standard one use the ve switch fab localhost ve:myvirtualenv command fab commands:dev : List development commands fab commands:deploy : List deployment commands """ if not dev_type or dev_type == 'dev': print _green("Development commands") print """
def gunicorn_status(): """ Check if gunicorn is running or not """ with settings(hide("running", "stdout", "stderr"), warn_only=True): result = run('if ps aux | grep -v grep | grep -i "gunicorn"; then echo 1; else echo ""; fi') if result: print (_green("Gunicorn is running fine......................")) else: print (_red("Gunicorn is not running ......................")) confirmation = confirm("Do you want to trouble shoot here??", default=True) if confirmation: print (_green("Ouputting gunicorn error logs")) with show("debug", "stdout", "stderr"): run("sudo tail -n 50 /applogs/gunicorn_error.logs")
def check_image_availability(ami_id): print(_green("Building AMI...")) conn = boto.ec2.connect_to_region(env.ec2_region) image = conn.get_image(ami_id) while image.state == u'pending': print(_yellow("AMI state: %s" % image.state)) time.sleep(15) image.update() if image.state == u'available': print(_green("AMI is ready.")) print(_green("AMI ID: %s" % image.id)) print(_green("AMI Name: %s" % image.name)) print(_green("AMI Description: %s" % image.description)) else: print(_yellow("AMI state: %s" % image.state))
def terminate_instance(name): """ Terminates all servers with the given name """ print(_green("Started terminating {}...".format(name))) conn = connect_to_ec2() filters = {"tag:Name": name} for reservation in conn.get_all_instances(filters=filters): for instance in reservation.instances: if "terminated" in str(instance._state): print "instance {} is already terminated".format(instance.id) continue else: print instance._state print(instance.id, instance.tags['Name']) if raw_input("terminate? (y/n) ").lower() == "y": print(_yellow("Terminating {}".format(instance.id))) conn.terminate_instances(instance_ids=[instance.id]) os.remove( os.path.join(env.ssh_directory, ''.join([name, '.json']))) # noqa os.remove( os.path.join(env.fab_hosts_directory, ''.join([name, '.txt']))) print(_yellow("Terminated"))
def copy_manifests(): print(_green("Copying puppet manifests...")) local('git archive --prefix=puppet-minecraft/ --output=puppet-minecraft.tar.gz HEAD') put('puppet-minecraft.tar.gz', '/home/ubuntu') with cd('/home/ubuntu'): run('tar xzf puppet-minecraft.tar.gz') local('rm puppet-minecraft.tar.gz')
def backup_db_simple_postgresql(is_download=True): """ Simple backup db function for postgresql. Export db to sql & download file to local """ today = datetime.date.today() NAME = '%s-%s.sql' % (DATABASE_NAME, today) OUT = '%s/%s' % (REMOTE_BACKUP_FOLDER, NAME) # Check folder backup is exist then create if not exists(REMOTE_BACKUP_FOLDER): sudo('mkdir %s' % REMOTE_BACKUP_FOLDER) # Assign permission on backup folder sudo('chmod 777 %s' % REMOTE_BACKUP_FOLDER) if exists(OUT): sudo('rm %s' % OUT) sudo('pg_dump %s > %s' % (DATABASE_NAME, OUT), user=POSTGRES_USER) if is_download: # Assign permission on local folder print(_green("Start download sql")) get(OUT, '/backup/%s' % NAME)
def clone_puppet_repo(retry_count=0): with cd('/etc/puppet/modules'): if not exists('couchdb'): try: sudo( 'git clone https://github.com/wieden-kennedy/spandex-couch /etc/puppet/modules/couchdb' ) except SystemExit as e: if retry_count < 1: print(retry_count) retry_count += 1 print( _red( "\nError with github authentication. Reattempting one more time.\n" )) clone_puppet_repo(retry_count) else: print( _red( "\nGithub authentication failed. Please run the command again with the proper credentials." )) else: print( _green( "Puppet module already exists. Attempting to update module from git." )) update_puppet_repo()
def restart_with_new_repo(): with cd("/home/ubuntu/VirtualEnvironment/"): with prefix("source bin/activate"): result = run( 'if ps aux | grep -v grep | grep -i "gunicorn"; then echo 1; else echo ""; fi' ) if result: print("\n\n%s\n\n" % _green("Gunicorn is running")) confirmation = confirm("Do you want to restart gunicorn", default=True) if confirmation: pid = run( "ps aux | grep gunicorn | awk 'NR==1 {print $2}'") run("sudo kill -9 %s" % pid) result = run( 'if ps aux | grep -v grep | grep -i "gunicorn"; then echo 1; else echo ""; fi' ) if not result: print("\n\n%s\n\n" % _red( "Gunicorn has been stopped and is starting with new repo" )) with cd("Canworks"): run("git pull origin master") run("gunicorn -c configs/gunicorn_config.py api:app" ) result = run( 'if ps aux | grep -v grep | grep -i "gunicorn"; then echo 1; else echo ""; fi' ) if result: print("\n\n%s\n\n" % _green("Gunicorn is running")) run("sudo service nginx restart") else: print("\n\n%s\n\n" % _red( "Gunicorn is not running, U need to login to the server" )) else: print("\n\n%s\n\n" % _red("Gunicorn has not been stopped")) return else: print("\n\n%s\n\n" % _red("Gunicorn has been started yet")) with cd("Canworks"): run("gunicorn -c configs/gunicorn_config.py api:app") restart_with_new_repo()
def update(): print(_green("Connecting to EC2 Instance...")) execute(update_git) execute(update_nginx_conf) execute(nginx_status) print(_yellow("...Disconnecting EC2 instance...")) disconnect_all()
def check_instance_availability(): env.command_timeout = 5 env.timeout = 5 while not exists('/var/lib/cloud/instance/boot-finished', use_sudo=False, verbose=True): print(_yellow("Waiting for cloud-init to finish running...")) time.sleep(15) print(_green("Instance is ready.")) env.timeout = 10 env.command_timeout = None
def sendInfo(message, parser=False): logging.basicConfig( stream=sys.stdout, level=logging.DEBUG, format=_blue('%(asctime)-15s %(levelname)s >>>', bold=True) + ' %(message)s') logging.info(_green(message)) if parser: parser.print_help()
def gunicorn_status(): """ Check if gunicorn is running fine or not. """ print("\n\n\t\t%s" % _yellow("Checking gunicorn status")) with settings(hide("running", "stderr", "stdout")): result = run('ps aux | grep gunicorn') print(_green(result)) return
def download_corpora(): with cd("/home/ubuntu/VirtualEnvironment/"): with prefix("source bin/activate"): print(_green("Now downloading textblob packages")) run("python -m textblob.download_corpora") #nltk corpora with cd("/home/ubuntu/VirtualEnvironment/"): run("sudo python -m nltk.downloader all")
def health_check(): print(_green("Connecting to EC2 Instance...")) execute(mongo_status) execute(nginx_status) execute(gunicorn_status) execute(disk_usage) execute(ram_usage) print(_yellow("...Disconnecting EC2 instance...")) disconnect_all()
def update_puppet_repo(): with cd('/etc/puppet/modules/'): if not exists('couchdb'): clone_puppet_repo() with cd('/etc/puppet/modules/couchdb'): status = sudo('git status | awk \'NR==2\'') if not match('nothing to commit', status): sudo('git pull') else: print(_green('Puppet module already up-to-date.'))