def herd(name, newborn=False): """Do something with the box named <name>""" refresh_boxen() if not newborn: # keep the one set in env.box, to get the admin pass env.box = env.boxen[name] # get the IPs: ips = env.box.addresses['public'] env.box_public_ips = dict([(ip['version'], ip['addr']) for ip in ips]) host = 'root@{0}:22'.format(env.box_public_ips[4]) env.hosts = [host] # since we might not have auth on this box, we just change the admin pass # every time - this means that even if provisioning of SSH keys fails, we # can still get access and it also means that fabric doesn't need to know # anything! env.passwords = getattr(env, 'passwords', {}) if not host in env.passwords: password = getattr(env.box, 'adminPass', False) if not password: password = str(uuid4())[:12] env.box.change_password(password) print white("Changed password of server to:"), red(password) time.sleep(10) # takes a while for change_password to work it seems env.passwords[host] = password else: env.password = env.passwords[host] print green( "Ok, found server {0}:{1}".format(env.box.name, env.box.id))
def migrate_check(auto_migrate=False): """ Checks to see whether migrations need to be run, if they do it will prompt to run them before continuing. looks for " - Migrating" in the output of the dry run """ migration_cmd = ( "/opt/edx/bin/django-admin.py migrate --noinput " "--settings=lms.envs.aws --pythonpath=/opt/wwc/edx-platform" ) with prefix("export SERVICE_VARIANT=lms"): with hide("running", "stdout", "stderr", "warnings"): dryrun_out = sudo(migration_cmd + " --db-dry-run", user="******") migrate = False for chunk in dryrun_out.split("Running migrations for "): if "Migrating" in chunk: print "!!! Found Migration !!!\n" + chunk migrate = True if migrate: if auto_migrate or console.confirm( green(migration_cmd) + white("\n") + white("Run migrations? ", bold=True), default=True ): noopable(sudo)(migration_cmd, user="******")
def node_print(output_text, output): print "{} {}\t {} {}".format( white("Node:"), green(env.host), white(output_text), green(output), )
def display_pre_post(choices): """ Displays list of pre and post checkout commands, returns the list of commands in a dictionary return({ 'pre': [ 'cmd1', 'cmd2', ... ], 'post': [ 'cmd1', 'cmd2', ... ] }) """ pkg_info = PackageInfo() pre_post = pkg_info.pre_post_actions(choices) for stage in ['pre', 'post']: if pre_post[stage]: fastprint(green('{0}-checkout commands:\n'.format(stage), bold=True) + green(' -> ') + green('\n -> '.join( pre_post[stage])) + white('\n\n')) else: fastprint(green('WARNING', bold=True) + green(' - no {0}-checkout commands for this set ' 'of packages : '.format(stage, choices)) + white('\n\n')) return pre_post
def migrate_check(auto_migrate=False): """ Checks to see whether migrations need to be run, if they do it will prompt to run them before continuing. looks for " - Migrating" in the output of the dry run """ migration_cmd = "/opt/edx/bin/django-admin.py migrate --noinput " \ "--settings=lms.envs.aws --pythonpath=/opt/wwc/edx-platform" with prefix("export SERVICE_VARIANT=lms"): with hide('running', 'stdout', 'stderr', 'warnings'): dryrun_out = sudo(migration_cmd + " --db-dry-run", user="******") migrate = False for chunk in dryrun_out.split('Running migrations for '): if 'Migrating' in chunk: print "!!! Found Migration !!!\n" + chunk migrate = True if migrate: if auto_migrate or console.confirm( green(migration_cmd) + white('\n') + white('Run migrations? ', bold=True), default=True): noopable(sudo)(migration_cmd, user='******')
def set_symlinks(): print white('--- set symlinks ---', bold=True) with cd('~/'): dotfiles = ''' zshrc zshenv tmux.conf vimrc vim gitignore gitconfig gitattributes '''.split() map(lambda _: run('ln -sf dotfiles/_{0} .{0}'.format(_)), dotfiles)
def check_env_vars(): if env.log_level <= logging.INFO: print(white('\nEnvironment checkup', bold=True)) envs = ['HTTP_PROXY', 'HTTPS_PROXY', 'NO_PROXY', 'http_proxy', 'https_proxy', 'no_proxy', 'conda_default_env'] for e in envs: value = os.environ.get(e, '') if value: if env.log_level <= logging.INFO: print('{0} {1:15} = {2:20}'.format( yellow(' >', bold=True), e, yellow(value, bold=True))) else: if env.log_level <= logging.INFO: print('{0} {1:15}'.format( yellow(' >', bold=True), e)) if env.log_level <= logging.INFO: print(green('Everything is looking good!')) if env.log_level <= logging.INFO: print(white('\nChecking for .env file', bold=True)) mandatory_envs = ['SITE_ID', 'DEBUG'] if os.path.exists('./.env'): if env.log_level <= logging.INFO: print(green('Found .env file')) os.environ.get('PATH', '') else: if env.log_level <= logging.ERROR: print(red('.env does not exist!'))
def set_symlinks(src='dotfiles'): print white('--- set symlinks ---', bold=True) with cd('~/'): dotfiles = ''' zshrc zshenv tmux.conf vimrc vim gitignore gitconfig gitattributes mackup.cfg '''.split() map(lambda _: run('ln -sf {0}/_{1} .{1}'.format(src, _)), dotfiles)
def display_pre_post(choices): """ Displays list of pre and post checkout commands, returns the list of commands in a dictionary return({ 'pre': [ 'cmd1', 'cmd2', ... ], 'post': [ 'cmd1', 'cmd2', ... ] }) """ pkg_info = PackageInfo() pre_post = pkg_info.pre_post_actions(choices) for stage in ['pre', 'post']: if pre_post[stage]: fastprint( green('{0}-checkout commands:\n'.format(stage), bold=True) + green(' -> ') + green('\n -> '.join(pre_post[stage])) + white('\n\n')) else: fastprint( green('WARNING', bold=True) + green(' - no {0}-checkout commands for this set ' 'of packages : '.format(stage, choices)) + white('\n\n')) return pre_post
def color_test(): for x in range(0, 2): print colors.blue('Blue text', bold=False) + '\n' time.sleep(0.2) print colors.cyan('cyan text', bold=False) time.sleep(0.2) print colors.green('green text', bold=False) time.sleep(0.2) print colors.magenta('magenta text', bold=False) time.sleep(0.2) print colors.red('red text', bold=False) time.sleep(0.2) print colors.white('white text', bold=False) time.sleep(0.2) print colors.yellow('yellow text', bold=False) time.sleep(0.2) print colors.blue('Blue text bold', bold=True) time.sleep(0.2) print colors.cyan('cyan text bold', bold=True) time.sleep(0.2) print colors.green('green text bold', bold=True) time.sleep(0.2) print colors.magenta('magenta text bold', bold=True) time.sleep(0.2) print colors.red('red text bold', bold=True) time.sleep(0.2) print colors.white('white text bold', bold=True) time.sleep(0.2) print colors.yellow('yellow text bold', bold=True) time.sleep(0.2)
def set_symlinks(): print white('--- set symlinks ---', bold=True) with lcd('~/'): dotfiles = ''' zshrc zshenv tmux.conf vimrc tmux tmuxinator vim gitignore gitconfig gitattributes mackup.cfg '''.split() map(lambda _: local('ln -sf {0}/_{1} .{1}'.format(dotfiles_path, _)), dotfiles)
def install_diff_highlight(): print white('--- install diff highlight ---', bold=True) if not file_exists('/usr/local/bin/diff-highlight'): run('wget https://raw.githubusercontent.com/git/git/master/contrib/diff-highlight/diff-highlight') with settings(mode_sudo()): run('chmod +x diff-highlight') run('mv diff-highlight /usr/local/bin/diff-highlight')
def herd(name, newborn=False): """Do something with the box named <name>""" refresh_boxen() if not newborn: # keep the one set in env.box, to get the admin pass env.box = env.boxen[name] # get the IPs: ips = env.box.addresses['public'] env.box_public_ips = dict([(ip['version'], ip['addr']) for ip in ips]) host = 'root@{0}:22'.format(env.box_public_ips[4]) env.hosts = [host] # since we might not have auth on this box, we just change the admin pass # every time - this means that even if provisioning of SSH keys fails, we # can still get access and it also means that fabric doesn't need to know # anything! env.passwords = getattr(env, 'passwords', {}) if not host in env.passwords: password = getattr(env.box, 'adminPass', False) if not password: password = str(uuid4())[:12] env.box.change_password(password) print white("Changed password of server to:"), red(password) time.sleep( 10) # takes a while for change_password to work it seems env.passwords[host] = password else: env.password = env.passwords[host] print green("Ok, found server {0}:{1}".format(env.box.name, env.box.id))
def install_diff_highlight(): print white('--- install diff highlight ---', bold=True) if not file_exists('/usr/local/bin/diff-highlight'): run('wget https://raw.githubusercontent.com/git/git/master/contrib/diff-highlight/diff-highlight' ) with settings(mode_sudo()): run('chmod +x diff-highlight') run('mv diff-highlight /usr/local/bin/diff-highlight')
def test_color(): print blue('blue', bold=True) print red('red') print green('green') print yellow('yellow') print white('white') print cyan('cyan') print magenta('magenta') print black('black')
def bootstrap(): """ Runs once """ append("~/.bash_profile", "alias vi=vim") append("~/.bash_profile", "alias l=ls") append("~/.bash_profile", "alias ll='ls -al'") append("~/.bash_profile", "export PROJECT_NAME=%s" % env.project_name) append("~/.bash_profile", "export VAGRANT_ROOT=/vagrant/deploy") sudo("apt-get update") #install vim to help edit files faster apt("vim") #install apc prerequisites apt("make libpcre3 libpcre3-dev re2c") #install python 2.6 (needed for google sitemaps) apt("python") #install_dependencies and lamp apt("tasksel rsync") apt("apache2 libapache2-mod-php5 libapache2-mod-auth-mysql \ php5-mysql") sudo("a2enmod php5") sudo("a2enmod rewrite") sudo("a2enmod headers") sudo("a2enmod expires") #ensure apache is started at this point restart_server() apt("php-pear php5-dev") #run this AFTER we install apache, or the following error will happen #apache2: Could not reliably determine the server's fully qualified domain name, using 127.0.1.1 for ServerName sudo('''sh -c "echo 'ServerName %s' > /etc/apache2/httpd.conf"''' % env.project_name) #sudo('sh -c \047echo \042ServerName servver_name_here\042 > /etc/apache2/httpd.conf\047') # alternate method #install git apt("git-core") #pecl may throw errors if apc is installed already. with settings(warn_only=True): #silent install sudo('printf "\n" | pecl install apc') print(white("If you have an authentication error occurs connecting to git, run $ ssh-add")) #check key to see if it exists, only generate new key if one isnt already made. if not exists("%s/.ssh/id_rsa" % env.project_root): print(white("Trying to run automatically, please enter your desired password when prompted.")) local("ssh-add") deploy()
def set_streaming_throughput_limit(limit=200): """Sets the streaming throughput limit across the Cassandra cluster.""" print "\n" print white("Setting streaming throughput limit to {}:".format(limit)) execute( _set_stream_throughput_limit, limit=limit, hosts=get_all_cassandra_nodes(), ) print "\n"
def set_compaction_throughput_limit(limit=64): """Sets the compaction throughput limit across the Cassandra cluster.""" print "\n" print white("Setting compaction throughput limit to {}:".format(limit)) execute( _set_compaction_throughput_limit, limit=limit, hosts=get_all_cassandra_nodes(), ) print "\n"
def disable_auto_compactions(keyspace=None): """Disables automatic compaction for the given keyspace in the cluster.""" print "\n" print white("Disabling Automatic Compactions:") execute( _disable_auto_compactions, keyspace=keyspace, hosts=get_all_cassandra_nodes(), ) print "\n"
def disable_compaction_throughput_limit(): """Disable the compaction throughput limit across the Cassandra cluster.""" print "\n" print white("Disabling compaction throughput limit:") execute( _set_compaction_throughput_limit, limit=0, hosts=get_all_cassandra_nodes(), ) print "\n"
def disable_streaming_throughput_limit(): """Disable the streaming throughput limit across the Cassandra cluster.""" print "\n" print white("Disabling streaming throughput limit:") execute( _set_stream_throughput_limit, limit=0, hosts=get_all_cassandra_nodes(), ) print "\n"
def inner(*args, **kwargs): if quiet: puts(green(txt + '...', bold=True), end='', flush=True) with hide('everything'): result = fn(*args, **kwargs) puts(white('Woo.\n'), show_prefix=False, flush=True) else: puts(green(txt + '...', bold=True)) result = fn(*args, **kwargs) puts(white('Woo.\n')) return result
def launch_instance(image_name=BASE_IMAGE_NAME, instance_name=BASE_INSTANCE_NAME, wait=False): """ Launch an instance. It uses ``BASE_IMAGE_NAME`` as the default image, and ``BASE_INSTANCE_NAME`` as the default instance name. It waits for the instance to be running, but doesnt' wait for the instance startup. :type image_name: string :param image_name: The name of the image to launch (``BASE_IMAGE_NAME`` by default). :type instance_name: string :param instance_name: The name to give to the launched instance ((``BASE_INSTANCE_NAME`` by default). :rtype: class:`boto.ec2.Instance` or ``None``. :return: the launched instance. """ images = find_images(name=image_name) instance = None if not images or len(images) == 0: print red('No images to launch') else: image = images[0] print green('Creating instance with image %s' % image.id) args = dict( key_name=INSTANCE_KEY_NAME, security_groups=(INSTANCE_SECURITY_GROUP, ), ) if image.root_device_type != 'instance-store': args.update({ 'instance_initiated_shutdown_behavior': "stop", }) reservation = image.run(**args) if reservation: instance = reservation.instances[0] print green('Waiting for instance %s to be available...' % instance.id) time.sleep(3) status = instance.update() while status != 'running': print white('Waiting...') time.sleep(3) status = instance.update() add_name(instance, instance_name) print green('Instance %s with dns_name %s launched' % (instance.id, instance.dns_name)) if wait: check_instance(instance) return instance
def inner(*args, **kwargs): if quiet: puts(green(txt + "...", bold=True), end="", flush=True) with hide("everything"): result = fn(*args, **kwargs) puts(white("Woo.\n"), show_prefix=False, flush=True) else: puts(green(txt + "...", bold=True)) result = fn(*args, **kwargs) puts(white("Woo.\n")) return result
def install_python_packages(): print white('--- install python packages ---', bold=True) if not file_exists('/usr/bin/pip'): run('wget https://bootstrap.pypa.io/get-pip.py') with settings(mode_sudo()): run('/usr/local/bin/python2.7 get-pip.py') run('rm get-pip.py') with settings(mode_sudo()): run('ln -sf /usr/local/bin/pip /usr/bin/pip') run('pip install ipython') run('pip install virtualenv') run('pip install Pygments')
def install_python27(): print white('--- install python2.7 ---', bold=True) #if not file_exists('/usr/local/bin/python2.7'): with cd('~/'): run('curl -O https://www.python.org/ftp/python/2.7.9/Python-2.7.9.tgz') run('tar zxvf Python-2.7.9.tgz') with cd('/home/{}/Python-2.7.9'.format(run('whoami'))): run('./configure') run('make') sudo('make install') with cd('~/'): run('rm Python-2.7.9.tgz') run('rm -rf Python-2.7.9')
def enable_auto_compactions(keyspace=None): """Enables automatic compaction for the given keyspace in the cluster.""" print "\n" if keyspace is None: abort(yellow("Please specify a keyspace!")) print white("Enabling Automatic Compactions:") execute( _enable_auto_compactions, keyspace=keyspace, hosts=get_all_cassandra_nodes(), ) print "\n"
def sosreport_command_task(user="******", password="******"): """ Execute sosreport on each host, passing hostname and ticket number """ env.user = user env.password = password hostname = env.host.replace(".", "_") message = "Running sosreport on " + env.host message_style = "[{0: <20}] {1}" print white(message_style.format("INFO", message)) sosreport_command = "sosreport --name=" + hostname + " --ticket-number=000 " + "--batch" return run(sosreport_command)
def set_mac_environment(): print white('--- set mac environment ---', bold=True) sudo('nvram SystemAudioVolume=%80') run('defaults write com.apple.dock autohide -bool true') run('defaults write com.apple.dock autohide-delay -float 0') run('defaults write com.apple.dock magnification -bool true') run('defaults write com.apple.dock tilesize -int 40') run('defaults write com.apple.dock largesize -int 80') run('defaults write com.apple.dock mineffect -string "scale"') run('killall Dock') run('defaults write com.apple.finder CreateDesktop -bool false') run('killall Finder')
def launch_instance(image_name=BASE_IMAGE_NAME, instance_name=BASE_INSTANCE_NAME, wait=False): """ Launch an instance. It uses ``BASE_IMAGE_NAME`` as the default image, and ``BASE_INSTANCE_NAME`` as the default instance name. It waits for the instance to be running, but doesnt' wait for the instance startup. :type image_name: string :param image_name: The name of the image to launch (``BASE_IMAGE_NAME`` by default). :type instance_name: string :param instance_name: The name to give to the launched instance ((``BASE_INSTANCE_NAME`` by default). :rtype: class:`boto.ec2.Instance` or ``None``. :return: the launched instance. """ images = find_images(name=image_name) instance = None if not images or len(images) == 0: print red('No images to launch') else: image = images[0] print green('Creating instance with image %s' % image.id) args = dict( key_name=INSTANCE_KEY_NAME, security_groups=(INSTANCE_SECURITY_GROUP,), ) if image.root_device_type != 'instance-store': args.update({ 'instance_initiated_shutdown_behavior' : "stop", }) reservation = image.run(**args) if reservation: instance = reservation.instances[0] print green('Waiting for instance %s to be available...' % instance.id) time.sleep(3) status = instance.update() while status != 'running': print white('Waiting...') time.sleep(3) status = instance.update() add_name(instance, instance_name) print green('Instance %s with dns_name %s launched' % (instance.id, instance.dns_name)) if wait: check_instance(instance) return instance
def sync_files(): """ Sync modified files and establish necessary permissions in selected environment. """ require('group', 'public_dir') print white("Uploading code to server...", bold=True) ursync_project( local_dir='./', remote_dir=env.public_dir, exclude=env.exclude, delete=True, default_opts='-chrtvzP' )
def sosreport_command_task(user='******', password='******'): """ Execute sosreport on each host, passing hostname and ticket number """ env.user = user env.password = password env.parallel = True hostname = env.host.replace(".", "_") message = 'Running sosreport on ' + env.host message_style = "[{0: <20}] {1}" print white(message_style.format('INFO', message)) sosreport_command = ("sosreport --name=" + hostname + " --ticket-number=000 " + "--batch") return run(sosreport_command)
def iperf_command_task(command, user='******', password='******'): """ Execute iperf on each host in client mode :param command: Iperf command to execute :param user: username of the remote user :param password: password of the remote user """ env.user = user env.password = password env.parallel = True message = 'Running iperf test on ' + env.host message_style = "[{0: <20}] {1}" print white(message_style.format('INFO', message)) return run(command)
def check_env_vars(): if env.log_level <= logging.INFO: print(white('\nEnvironment checkup', bold=True)) envs = [ 'HTTP_PROXY', 'HTTPS_PROXY', 'NO_PROXY', 'http_proxy', 'https_proxy', 'no_proxy', 'conda_default_env' ] for e in envs: value = os.environ.get(e, '') if value: if env.log_level <= logging.INFO: print('{0} {1:15} = {2:20}'.format(yellow(' >', bold=True), e, yellow(value, bold=True))) else: if env.log_level <= logging.INFO: print('{0} {1:15}'.format(yellow(' >', bold=True), e)) if env.log_level <= logging.INFO: print(green('Everything is looking good!')) if env.log_level <= logging.INFO: print(white('\nChecking for .env files', bold=True)) mandatory_envs = ['SITE_ID', 'DEBUG'] if os.path.exists('./.local.env'): if env.log_level <= logging.INFO: print(green('Found .local.env file')) os.environ.get('PATH', '') else: if env.log_level <= logging.ERROR: print(red('.local.env does not exist!')) if os.path.exists('./.staging.env'): if env.log_level <= logging.INFO: print(green('Found .staging.env file')) os.environ.get('PATH', '') else: if env.log_level <= logging.ERROR: print(red('.staging.env does not exist!')) if os.path.exists('./.production.env'): if env.log_level <= logging.INFO: print(green('Found .production.env file')) os.environ.get('PATH', '') else: if env.log_level <= logging.ERROR: print(red('.production.env does not exist!'))
def provision(): print(magenta('Starting Provisioning')) message = 'Waiting for puppet to become available' with hide('everything'): with settings(warn_only=True): while 1: sys.stdout.write("\r" + magenta(message) + " ") sys.stdout.flush() # we don't have a puppet master here # so we need to poll if run("which puppet").succeeded: sys.stdout.write("\n") sys.stdout.flush() break message = message + white('.') time.sleep(2) # this AMI does not let you log in as root. # we need to be sure the agent-forwarding is active # when we provision, so we pass -E on top of the default # fabric sudo prefix. The default rackspace images # allow you to ssh as root sudo_prefix = "sudo -S -E -p '%(sudo_prompt)s' " % env with settings(sudo_prefix=sudo_prefix): sudo("puppet apply --modulepath '/home/ubuntu/configuration/modules' /home/ubuntu/configuration/site.pp")
def need_update(self): try: with fab.settings(warn_only=True): update_ts = float(fab.local('cat %s' % self.update_status_file, capture=True).strip()) except ValueError: update_ts = 0 last_update = datetime.fromtimestamp(update_ts) if (datetime.now() - last_update) > fab.env['expire_timedelta']: print colors.red('update requirements needed') return True else: print colors.white('update requirements not needed') return False
def bootstrap(): print(white("Creating environment %s" % env.name)) """ Runs once """ sudo("apt-get update") #enable cakephp dependencies sudo("a2enmod rewrite") sudo("a2enmod headers") sudo("a2enmod expires") #ensure apache is started at this point restart_server() #run this AFTER we install apache, or the following error will happen #apache2: Could not reliably determine the server's fully qualified domain name, using 127.0.1.1 for ServerName sudo('''sh -c "echo 'ServerName PLS' > /etc/apache2/httpd.conf"''') git_website() #set variable so it wont run git commands twice if we bootstrap env.bootstrapping = True init_db() deploy()
def provision(): print(magenta('Starting Provisioning')) message = 'Waiting for puppet to become available' with hide('everything'): with settings(warn_only=True): while 1: sys.stdout.write("\r" + magenta(message) + " ") sys.stdout.flush() # we don't have a puppet master here # so we need to poll if run("which puppet").succeeded: sys.stdout.write("\n") sys.stdout.flush() break message = message + white('.') time.sleep(2) # this AMI does not let you log in as root. # we need to be sure the agent-forwarding is active # when we provision, so we pass -E on top of the default # fabric sudo prefix. The default rackspace images # allow you to ssh as root sudo_prefix = "sudo -S -E -p '%(sudo_prompt)s' " % env with settings(sudo_prefix=sudo_prefix): sudo( "puppet apply --modulepath '/home/ubuntu/configuration/modules' /home/ubuntu/configuration/site.pp" )
def deploy(): #UPDATE the server with the newest updates from github. print(white("Updating environment %s" % env.name)) if env.bootstrapping == False: git_website() create_required_folders() #put new templates up every time put_templates() apt('libmcrypt-dev') #make sure we have ssl enabled sudo('a2enmod ssl') #make sure correct apache symlinks are created #and proper deploy config is loaded sudo('a2ensite %s' % env.project_name) #disable the default website sudo('a2dissite default') restart_server() upgrade_schema()
def __analise(aws_ag): if len(aws_ag['SuspendedProcesses']): processes_blocked = __get_blocked__processes(aws_ag) print("AutoScalingGroupName: " + aws_ag['AutoScalingGroupName']) print("LaunchConfigurationName: " + aws_ag['LaunchConfigurationName'] + " || Status: " + red("Blocked " + str(len(aws_ag['SuspendedProcesses'])) + " processes")) if args.verbose: print("The following processes are blocked >> " + red(processes_blocked)) else: print("AutoScalingGroupName: " + aws_ag['AutoScalingGroupName']) print("LaunchConfigurationName: " + aws_ag['LaunchConfigurationName'] + " || Status: " + green("Unblocked")) print(yellow(">> aws autoscaling suspend-processes --auto-scaling-group-name=\"" + aws_ag['AutoScalingGroupName'] + "\"")) print(green(">> aws autoscaling resume-processes --auto-scaling-group-name=\"" + aws_ag['AutoScalingGroupName'] + "\"")) if args.verbose: splitted = str(aws_ag['AutoScalingGroupARN']).split(":") region = splitted[3] id_with_no_space = re.sub(r"\s+", '+', aws_ag['AutoScalingGroupName']) print(white("Link to console: https://console.aws.amazon.com/ec2/autoscaling/home?region=" + region + "#AutoScalingGroups:id=" + id_with_no_space + ";view=details")) print("")
def pg_create_user(username, password): _run_as_pg('psql -d postgres -c ' '"CREATE USER {user} WITH PASSWORD \'{pas}\'"'. format(user=username, pas=password)) print(white('psql -d postgres -c ' '"CREATE USER {user} WITH PASSWORD \'{pas}\'"'. format(user=username, pas=password)))
def test_colors(): """Prints some strings with color output""" print(colors.red("red text")) print(colors.red("Bold red text", bold=True)) print(colors.green("green text")) print(colors.green("Bold green text", bold=True)) print(colors.blue("blue text")) print(colors.blue("Bold blue text", bold=True)) print(colors.cyan("cyan text")) print(colors.cyan("Bold cyan text", bold=True)) print(colors.yellow("yellow text")) print(colors.yellow("Bold yellow text", bold=True)) print(colors.magenta("magenta text")) print(colors.magenta("Bold magenta text", bold=True)) print(colors.white("white text")) print(colors.white("Bold white text", bold=True))
def show(): """ List projects and some of their config properties. """ print print "Detected projects:" for subdir in env['detected_projects']: if subdir in env['enabled_projects']: abled_state = colors.green("enabled") hint = "to %s run: disable:%s" % (colors.white('disable'), subdir) else: abled_state = colors.yellow("disabled") hint = "to %s run: enable:%s" % (colors.white('enable'), subdir) print print " %s (%s)" % (colors.cyan(subdir), abled_state) print " `- %s" % hint
def need_update(self): try: with fab.settings(warn_only=True): update_ts = float( fab.local('cat %s' % self.update_status_file, capture=True).strip()) except ValueError: update_ts = 0 last_update = datetime.fromtimestamp(update_ts) if (datetime.now() - last_update) > fab.env['expire_timedelta']: print colors.red('update requirements needed') return True else: print colors.white('update requirements not needed') return False
def print_success(message): try: from colorama import init, Back init() print(Back.GREEN + colors.white(u"%s" % message, bold=True)) except ImportError: print(colors.green(u"%s" % message, bold=True))
def color_test(): number = 1 for x in range(0, 2): print colors.blue('{}: Blue text'.format(number), bold=False) number += 1 time.sleep(0.2) print colors.cyan('{}: cyan text'.format(number), bold=False) number += 1 time.sleep(0.2) print colors.green('{}: green text'.format(number), bold=False) number += 1 time.sleep(0.2) print colors.magenta('{}: magenta text'.format(number), bold=False) number += 1 time.sleep(0.2) print colors.red('{}: red text'.format(number), bold=False) number += 1 time.sleep(0.2) print colors.white('{}: white text'.format(number), bold=False) number += 1 time.sleep(0.2) print colors.yellow('{}: yellow text'.format(number), bold=False) number += 1 time.sleep(0.2) print colors.blue('{}: Blue text bold'.format(number), bold=True) number += 1 time.sleep(0.2) print colors.cyan('{}: cyan text bold'.format(number), bold=True) number += 1 time.sleep(0.2) print colors.green('{}: green text bold'.format(number), bold=True) number += 1 time.sleep(0.2) print colors.magenta('{}: magenta text bold'.format(number), bold=True) number += 1 time.sleep(0.2) print colors.red('{}: red text bold'.format(number), bold=True) number += 1 time.sleep(0.2) print colors.white('{}: white text bold'.format(number), bold=True) number += 1 time.sleep(0.2) print colors.yellow('{}: yellow text bold'.format(number), bold=True) number += 1 time.sleep(0.2) print
def sdk_release(is_upload_archives=True): """ Build library into public/card.io-Android-SDK. """ execute(sdk_setup) version_str = _get_release_version() _confirm_tag_overwrite(env.top_root, version_str) local("git tag -f {0}".format(version_str)) with settings(hide(*env.to_hide)): print(colors.blue("building sdk {version_str} ".format(**locals()))) build(is_upload_archives) print(colors.blue("extracting sdk {version_str} to public repo".format(**locals()))) release_path = os.path.join(env.top_root, "card.io", "build", "outputs", "aar", "card.io-release.aar") dest_file_name = "card.io-{version_str}.aar".format(**locals()) with lcd(env.public_repo_path): # remove old everything local("rm -rf *") local("mkdir aars") local("cp {release_path} aars/{dest_file_name}".format(**locals())) # update all sdk files local("cp -r " + os.path.join(env.top_root, "sdk") + "/* .") local("cp -r " + os.path.join(env.top_root, "sdk") + "/.[!.]* .") # update sample app local("cp -R " + os.path.join(env.top_root, "SampleApp") + " .") local("sed -i '' 's/io.card:android-sdk:REPLACE_VERSION/io.card:android-sdk:{version_str}/g' ./SampleApp/build.gradle".format(**locals())) local("sed -i '' 's/io.card:android-sdk:REPLACE_VERSION/io.card:android-sdk:{version_str}/g' ./README.md".format(**locals())) # add everything to git and commit local("git add .") local("git add -u .") local("git commit -am \"Update library to {version_str}\"".format(**locals())) _confirm_tag_overwrite(env.public_repo_path, version_str) with lcd(env.public_repo_path): local("git tag -f {0}".format(version_str)) print print(colors.white("Success!")) print "The distribution files are now available in {public_repo_path}".format(**env) print if is_upload_archives == True: print "The aar file has been published to sonatype's mavenCentral staging repo. Promote it!" print print "Commit proguard-data" print "Verify and merge back to master" print
def version(): """ Display the current version of the package. """ print " __________________________________________________________" print "| |" print "| Current version: |" print "| %s |" % colors.white(prj.build_name.center(55), bold=True) print "|__________________________________________________________|"