def reset_cleansers(confirm=True): """destroys all cleanser slaves and their rollback snapshots, as well as the initial master snapshot - this allows re-running the jailhost deployment to recreate fresh cleansers.""" if value_asbool(confirm) and not yesno("""\nObacht! This will destroy any existing and or currently running cleanser jails. Are you sure that you want to continue?"""): exit("Glad I asked...") get_vars() cleanser_count = AV['ploy_cleanser_count'] # make sure no workers interfere: fab.run('ezjail-admin stop worker') # stop and nuke the cleanser slaves for cleanser_index in range(cleanser_count): cindex = '{:02d}'.format(cleanser_index + 1) fab.run('ezjail-admin stop cleanser_{cindex}'.format(cindex=cindex)) with fab.warn_only(): fab.run('zfs destroy tank/jails/cleanser_{cindex}@jdispatch_rollback'.format(cindex=cindex)) fab.run('ezjail-admin delete -fw cleanser_{cindex}'.format(cindex=cindex)) fab.run('umount -f /usr/jails/cleanser_{cindex}'.format(cindex=cindex)) fab.run('rm -rf /usr/jails/cleanser_{cindex}'.format(cindex=cindex)) with fab.warn_only(): # remove master snapshot fab.run('zfs destroy -R tank/jails/cleanser@clonesource') # restart worker and cleanser to prepare for subsequent ansible configuration runs fab.run('ezjail-admin start worker') fab.run('ezjail-admin stop cleanser') fab.run('ezjail-admin start cleanser')
def server_setup(): # Set kernel options with warn_only(): sysctl('vm.swappiness', 0) # Basic system configuration file_update('/etc/sysconfig/clock', 'ZONE=', 'ZONE="America/Chicago"') sudo('cp -f /usr/share/zoneinfo/America/Chicago /etc/localtime') # Perform updates and installs yum.update() yum.install('yum-utils', 'ntp', 'ntpdate', 'uuid', 'zsh', 'screen', 'tmux', 'vim-enhanced', 'git', 'subversion', 'bind-utils', 'telnet', 'traceroute', 'curl', 'wget') # Performs a quick time sync so all logs are accurate with warn_only(): service.enable('ntpd') service.stop('ntpd') sudo('ntpdate time.nist.gov') sudo('ntpdate tick.usno.navy.mil') service.start('ntpd') # Add custom repos for nginx, more up-to-date PHP and MySQL packages, # and a few additional PHP modules with warn_only(): yum.add_epel_repo() for repo_name, repo_rpm_url in REPO_RPMS.iteritems(): yum.add_repo_rpm(repo_rpm_url) yum.enable_repo(repo_name)
def cluster_down(): """ Terminate ALL docker-machine machines """ if confirm("Stop and delete all cluster machines?", default=False): for host in env.hostnames: print("Terminating {}".format(host)) with warn_only(): local("docker-machine stop {}".format(host)) with warn_only(): local("docker-machine rm -f {}".format(host))
def _install_requirements(): distro = run('cat /etc/issue') if re.search("Ubuntu", distro): with warn_only(): run('sudo apt-get -y --no-upgrade install git') run('sudo apt-get -y --no-upgrade install dtach') elif re.search("CentOS", distro): run('rpm -qa | grep -qw git || sudo yum install -y -q git') run('rpm -qa | grep -qw dtach || sudo yum install -y -q dtach') with warn_only(): run("kill `ps -ef | grep proxy.py | grep -v grep | awk '{print $2}'`")
def aws_init(): """ One-off preparation for the project. """ with warn_only(): aws_create_key_pair() with warn_only(): aws_create_security_group() aws_create_ec2_role() aws_docker_login() cfg = prod_cfg() if cfg['layout'] == 'atto': with warn_only(): aws_create_repository('${project}_atto')
def supervisorconfig(): """ Конфигурирование supervisor """ with cd(env.project_root + '/conf'): with open('conf/supervisor.conf') as f: with warn_only(): append('supervisor.conf', f.read()) with warn_only(): run('rm /etc/supervisor/conf.d/%s.conf' % env.project_name) run('ln -s %s/conf/supervisor.conf /etc/supervisor/conf.d/%s.conf' % (env.project_root, env.project_name))
def server_prep(): with warn_only(): sudo("groupadd chsh") if not files.contains("/etc/pam.d/chsh", "trust group=chsh"): ## Terrible way to prepend to a file. DERP and MUR, accordingly. sudo( "sed -e '1i# This allows users to change their own shell' " " -e '1i# without being prompted for a password' " " -e '1iauth sufficient pam_wheel.so trust group=chsh\\n' " " -ri /etc/pam.d/chsh" ) with warn_only(): sysctl("vm.swappiness", 0)
def pull(): with warn_only(): local('cd boilerplate') local('git checkout develop') local('git pull') local('cd ..') local('git pull')
def update_app(env, branch='master', tag=''): if env == 'test': app_dir = TEST_APP_PATH_SRC else: app_dir = APP_PATH_SRC with cd(app_dir): run('git config user.email "*****@*****.**"') run('git config user.name "Cottage Labs LLP"') stash = run('git stash') run('git checkout master') # so that we have a branch we can definitely pull in # (in case we start from one that was set up for pushing only) run('git pull', pty=False) run('git branch --set-upstream-to origin/{branch}'.format(branch=branch)) # make sure we can pull here run('git checkout ' + branch) run('git pull', pty=False) if tag: run('git checkout {0}'.format(tag)) run('git submodule update --init --recursive', pty=False) run('git submodule update --recursive', pty=False) if not 'No local changes to save' in stash: with warn_only(): run('git stash apply') install_dependencies(env=env)
def removewar(): dirjboss() with warn_only(): local("del %JBOSS_HOME%\\standalone\\deployments\\ABC.war") local("rmdir %JBOSS_HOME%\\standalone\\deployments\\ABC.war /S /Q") local("del %JBOSS_HOME%\\standalone\\log\\server.log") dirjboss()
def safety(): """ Настройки безопасности на сервере - Добавление правил iptables - Конфигурирование сервера """ run('apt-get -y install iptables-persistent') run('mkdir -p /etc/iptables') with cd('/etc/iptables'): with warn_only(): with open('conf/iptables.v4.conf', 'r') as f: append('rules.v4', f.read()) with open('conf/iptables.v6.conf', 'r') as f: append('rules.v6', f.read()) # Настройки безопасности lines = [ 'net.ipv4.conf.all.accept_redirects=0', 'net.ipv6.conf.all.accept_redirects=0', 'net.ipv4.tcp_syncookies=1', 'net.ipv4.tcp_timestamps=0', 'net.ipv4.conf.all.rp_filter=1', 'net.ipv4.tcp_max_syn_backlog=1280', 'kernel.core_uses_pid=1' ] for line in lines: if not contains('/etc/sysctl.conf', line, exact=True): run('echo %s >> /etc/sysctl.conf' % line)
def run(solard_context, cmd, **kwargs): # return check_output(shlex.split(cmd)) executor = fabric_api.local # if kwargs.get('use_sudo', False): # cmd = 'sudo ' + cmd managers = [] cwd = kwargs.get('cwd') if cwd: managers.append(fabric_api.cd(kwargs['cwd'])) env = kwargs.get('env') if env: managers.append(fabric_api.shell_env(**kwargs['env'])) # we just warn, don't exit on solard # correct data is returned managers.append(fabric_api.warn_only()) with nested(*managers): out = executor(cmd, capture=True) result = {} for name in ('failed', 'return_code', 'stdout', 'stderr', 'succeeded', 'command', 'real_command'): result[name] = getattr(out, name) return result
def lint(): """Run all linters on the whole project.""" with warn_only(), section("linting project", green): tests = lint_app(), lint_frontend(), lint_backend(), if any(not test.succeeded for test in tests): abort("Linting failed")
def coverage(): setup(quiet=True) # To test things in the CLI, coverage has to be started with, and all subprocesses within have to be started with # COVERAGE_PROCESS_START (http://coverage.readthedocs.io/en/coverage-4.2/subprocess.html). # A sitecustomize.py file must also be created to that when python subprocesses are fired up, they can recognize # that they're a part of a coverage run. coverage_file = os.path.join(PROJECT_ROOT_DIRECTORY, '.coveragerc') sitecustomize_path = os.path.join(PROJECT_ROOT_DIRECTORY, 'sitecustomize.py') sitecustomize_pyc_path = '{}c'.format(sitecustomize_path) sitecustomize_file_contents = 'import coverage\ncoverage.process_startup()\n' with open(sitecustomize_path, 'w') as f: f.write(sitecustomize_file_contents) with warn_only(): r1 = local('coverage erase') r2 = local( 'COVERAGE_PROCESS_START={} coverage run -m unittest discover -v -t . -s tests' .format(coverage_file)) r3 = local('coverage combine') r4 = local('coverage report -m') os.unlink(sitecustomize_path) # Avoid any residual effects of having a pyc file present. if os.path.exists(sitecustomize_pyc_path): os.unlink(sitecustomize_pyc_path) for result in (r for r in (r1, r2, r3, r4) if r.failed): sys.exit(result.return_code)
def update_app(env, branch='master', tag=''): if env == 'test': app_dir = TEST_APP_PATH_SRC else: app_dir = APP_PATH_SRC with cd(app_dir): run('git config user.email "*****@*****.**"') run('git config user.name "Cottage Labs LLP"') stash = run('git stash') run('git checkout master' ) # so that we have a branch we can definitely pull in # (in case we start from one that was set up for pushing only) run('git pull', pty=False) run('git branch --set-upstream-to origin/{branch}'.format( branch=branch)) # make sure we can pull here run('git checkout ' + branch) run('git pull', pty=False) if tag: run('git checkout {0}'.format(tag)) run('git submodule update --init --recursive', pty=False) run('git submodule update --recursive', pty=False) if not 'No local changes to save' in stash: with warn_only(): run('git stash apply') install_dependencies(env=env)
def create_user(username, sudo=False, password=False, group='webapps', groups=None, homeroot=APPS_ROOT): """ Создает пользователя """ if not isinstance(groups, (list, tuple)): groups = [] if sudo: groups.append('sudo') if groups: groups = ' -G ' + ','.join(groups) else: groups = '' with warn_only(): run( 'useradd -m -d %(homeroot)s/%(user)s --shell /bin/bash -g %(group)s%(groups)s %(user)s' % { 'homeroot': homeroot, 'user': username, 'group': group, 'groups': groups }) if password: run('passwd %s' % username) # Сразу заливаем ssh ключ with cd('%s/%s' % (homeroot, username)): run('mkdir -p .ssh') with cd('.ssh'): append('authorized_keys', LOCAL_SSH_KEY)
def push_key(custom_key=None, user_name=None): user_name = '' if user_name is None else user_name key_names = ['id_dsa.pub', 'id_rsa.pub'] if custom_key: key_names = [custom_key] local_key = None for key_file in key_names: key_path = expanduser('~/.ssh/%s' % key_file) if exists(key_path): local_key = key_path break if not local_key: print(red('No local keys found.')) return key = open(local_key).read().strip() with warn_only(): run('mkdir -p ~%s/.ssh && chmod 700 ~%s/.ssh' % (user_name, user_name)) # TODO: $HOME used with append due to a fabric bugfix pending release. # The ~ gets quoted as a literal when it performs an egrep. # Fix should be available in 1.6.1 or 1.7.0, whichever is next. home_dir = '/home/%s' % user_name if user_name else '$HOME' files.append('%s/.ssh/authorized_keys' % home_dir, key, partial=True) run('chown -R %s.%s ~%s/.ssh' % (user_name, user_name, user_name))
def update_app(branch='master', tag=''): if not tag and branch == 'master': print 'Please specify a tag to deploy to production' sys.exit(1) with cd(APP_PATH_SRC): run('git config user.email "*****@*****.**"') run('git config user.name "Cottage Labs LLP"') stash = run('git stash') run('git checkout master' ) # so that we have a branch we can definitely pull in # (in case we start from one that was set up for pushing only) run('git pull', pty=False) run('git branch --set-upstream {branch} origin/{branch}'.format( branch=branch)) # make sure we can pull here run('git checkout ' + branch) run('git pull', pty=False) if tag: run('git checkout {0}'.format(tag)) run('git submodule update --init --recursive', pty=False) run('git submodule update --recursive', pty=False) if not 'No local changes to save' in stash: with warn_only(): run('git stash apply') install_dependencies()
def clear_nat_now(inface, outface): with warn_only(): sudo('iptables -t nat -D POSTROUTING -o ' + outface + ' -j MASQUERADE') sudo('iptables -D FORWARD -i ' + outface + ' -o ' + inface + ' -m state --state RELATED,ESTABLISHED -j ACCEPT') sudo('iptables -D FORWARD -i ' + inface + ' -o ' + outface + ' -j ACCEPT')
def base_server_install(): """ Базовые настройки сервера """ run('apt-get update -y') run('apt-get -y install locales') localesconfig() run('apt-get -y install sudo') run('apt-get -y install nginx') run('apt-get -y install git') run('apt-get -y install htop') with warn_only(): run('addgroup user') create_user(username=SERVER_USER, groups=['docker', 'sudo'], group='user', password=True, homeroot='/home') # Docker compose docker_compose_install() # Настройки безопасности safety()
def test(): """Run all tests on the whole project.""" with warn_only(), section("testing project", green): tests = test_backend(), if any(not test.succeeded for test in tests): abort("Tests failed")
def test(srcdir, ignore_fail): cmd = flo('cd {srcdir} && make test') if ignore_fail: with warn_only(): run(cmd) else: run(cmd)
def deploy(profile, branch): """Deploy specified branch Profile can be either "development" or "production" Branch is branch nage as used by Git (ex. origin/feature-1) This command will do the following steps: * checkout branch * install required Python packages usin pip * migrate database using South * run "manage.py collectstatic" * restart uwsgi daemon """ assert exists(env.project_dir), ''' It seems that you did not do install on that host. Please use "install" command first''' assert profile in ('development', 'production') env.profile = profile assert branch, "Please specify branch to checkout on that host" checkout(branch) install_dependencies() migrate() with virtualenv(): run('env DJANGO_SETTINGS_MODULE=keen.settings.%(profile)s ./manage.py collectstatic --noinput' % env) run('env DJANGO_SETTINGS_MODULE=keen.settings.%(profile)s ./manage.py compress' % env) with warn_only(): uwsgi_stop() # giv it some time to release port local('sleep 3') uwsgi_start() nginx_restart()
def pack(): # Make sure machine and dev tools are up to date sudo('sudo yum -y update') sudo('yum -y upgrade') sudo('yum -y install python27-devel python27-pip') # sudo('pip install --upgrade pip') # create a new source distribution zipfile local('git archive --format=zip HEAD -o %s' % gitfile, capture=False) # upload the source zipfile to the temporary folder on the server deploy_filename = '/tmp/wordnik.%s.zip' % str(time.time()) put(gitfile, deploy_filename) local('rm %s' % gitfile) # create a place where we can unzip the zipfile, then enter # that directory and unzip it with warn_only(): run('rm -rf ~/lambda') run('mkdir ~/lambda') run('virtualenv venv') with cd('~/lambda'): run('unzip %s' % deploy_filename) run('source ../venv/bin/activate && pip install numpy==1.10.4 -t . && pip install -r requirements.txt -t .') run('zip -9r ../wordnik.zip .') # Get the file back onto our local machine local('scp %s@%s:~/wordnik.zip %s' % (env.user, env.hosts[0], lambdafile)) update()
def try_repeatedly(method, max_retries=None, delay=None): """Execute the given Fabric call, retrying up to a certain number of times. The method is expected to be wrapper around a Fabric :func:`run` or :func:`sudo` call that returns the results of that call. The call will be executed at least once, and up to :code:`max_retries` additional times until the call executes with out failing. Optionally, a delay in seconds can be specified in between successive calls. :param callable method: Wrapped Fabric method to execute :param int max_retries: Max number of times to retry execution after a failed call :param float delay: Number of seconds between successive calls of :code:`method` :return: The results of running :code:`method` """ max_retries = max_retries if max_retries is not None else 1 delay = delay if delay is not None else 0 tries = 0 with warn_only(): while tries < max_retries: res = method() if not res.failed: return res tries += 1 time.sleep(delay) # final try outside the warn_only block so that if it # fails it'll just blow up or do whatever it was going to # do anyway. return method()
def run(self, resource, *args, **kwargs): log.debug('SSH: %s', args) executor = fabric_api.run if kwargs.get('use_sudo', False): executor = fabric_api.sudo managers = [ fabric_api.settings(**self._fabric_settings(resource)), ] cwd = kwargs.get('cwd') if cwd: managers.append(fabric_api.cd(kwargs['cwd'])) env = kwargs.get('env') if env: managers.append(fabric_api.shell_env(**kwargs['env'])) if kwargs.get('warn_only', False): managers.append(fabric_api.warn_only()) with nested(*managers): res = executor(' '.join(args)) return self.get_result(res)
def collect_results_from_nodes(res_dir): node_dir = path.join(res_dir, env.host_string) local("mkdir -p {node_dir}".format(**locals())) with warn_only(): with hide('everything'): for i in [YCSB_RUN_OUT_FILE, YCSB_RUN_ERR_FILE, YCSB_LOAD_OUT_FILE, YCSB_LOAD_ERR_FILE]: print("getting {0} for node {1}".format(i,env.host_string)) get(i, node_dir) print("getting {0} for node {1}".format('conf',env.host_string)) get(path.join(CODE_DIR, 'conf'), node_dir) print("getting {0} for node {1}".format('log file',env.host_string)) get(LOG_FILE, node_dir) git_status = {} for folder in [CODE_DIR, YCSB_CODE_DIR]: with cd(folder): out = run("git log -n 1", pty=False) git_status[folder] = out with open(path.join(node_dir, "git_status.out"), 'w') as f: print("getting {0} for node {1}".format('git',env.host_string)) f.write(str(git_status)) f.write('\n') for parameter in ['LargeReplSet', 'MyLargeReplSet', 'AllReads', 'AllWrites']: with open(path.join(node_dir, parameter + ".log"), 'a') as f: print("getting {0} for node {1}".format(parameter,env.host_string)) f.writelines(jmx.get_value(parameter) or []) print("getting {0} for node {1}".format('dstat',env.host_string)) get(DSTAT_SERVER,node_dir) get(DSTAT_YCSB,node_dir)
def lms(): '''Install and start a Logitech Media Server (lms). More infos: * http://wiki.slimdevices.com/index.php/Logitech_Media_Server * http://wiki.slimdevices.com/index.php/DebianPackage * http://www.mysqueezebox.com/download * XSqueeze on Kodi: * http://kodi.wiki/view/Add-on:XSqueeze * http://forum.kodi.tv/showthread.php?tid=122199 ''' # cf. http://wiki.slimdevices.com/index.php/DebianPackage#installing_7.9.0 cmds = '''\ url="http://www.mysqueezebox.com/update/?version=7.9.0&revision=1&geturl=1&os=deb" latest_lms=$(wget -q -O - "$url") mkdir -p ~/.logitech_media_server_sources cd ~/.logitech_media_server_sources wget $latest_lms lms_deb=${latest_lms##*/} sudo dpkg -i $lms_deb ''' run(cmds) run('sudo usermod -aG audio squeezeboxserver') with warn_only(): run('sudo addgroup lms') run('sudo usermod -aG lms squeezeboxserver') username = env.user run(flo('sudo usermod -aG audio {username}')) print('\n Set correct folder permissions manually, eg:') print(' > ' + cyan(flo('chown -R {username}.lms <path/to/your/media>'))) hostname = env.host print(flo('\n lms frontend available at http://{hostname}:9000'))
def runserver(): print(yellow('Running docker process webserver...')) with lcd('.'): ret = local( 'docker ps --quiet --filter "label={project_name}-webpack"'.format( project_name=project_name), capture=True) if len(ret) == 0: abort( red('Could not runserver. Have you run ' '\'fab development.webpack\'?')) with warn_only(): result = local('docker start {project_name}-mongo'.format( project_name=project_name)) if result.failed: abort( red('Could not start mongodb. Have you run \'setup_mongodb\'?') ) local( 'docker run --tty --interactive --volume "{local_pwd}":/opt/project ' '--entrypoint="/opt/project/run-vkt-voter" --publish=3000:3000 ' '--network={project_name}-network ' '--network-alias=webserver ' #'--user=$(id -u):$(id -g) ' '"{project_name}"'.format(local_pwd=local_pwd, project_name=project_name))
def compare_builds(url1, url2): """ Task to to compare packages in two different release engineering builds and verify rpm signature. :return: Check Package Versions in both builds are same and all packages under RCM_COMPOSE_URL are signed! """ signature = os.getenv('SIGNATURE') flag = flag1 = flag2 = 0 list1 = get_packages_name(urlopen(url1).read()) list1.sort() list2 = get_packages_name(urlopen(url2).read()) list2.sort() with warn_only(): try: run('mkdir packages') for pkg in range(len(list2)): get_packages(url2, list2[pkg]) for pkg in range(len(list2)): if 'NOT OK' not in run('rpm -K packages/' + list1[pkg]): flag1 = flag1 + 1 if signature in run('rpm -qpi packages/' + list2[pkg] + '| grep "Signature" '): flag2 = flag2 + 1 else: print('signature ' + signature + ' not matched for ' + list2[pkg]) else: print(list2[pkg] + 'package is not signed') finally: run('rm packages -rf') print("========================= Overall Report ======================") print("There are " + str(len(list1)) + " packages in " + url1 + " and " + str(len(list2)) + " packages in " + url2) for pkg in range(len(list1)): if list1[pkg] == list2[pkg]: flag = flag + 1 else: print("The version of package " + list1[pkg] + " from build1 is not similar to version of package " + list2[pkg] + " from build2.") if flag == len(list1) - 1: print("Versions in both builds are same") else: print(str((len(list1)) - flag) + " packages version found mismatched!") if flag1 == len(list1): print("All packages are signed!") else: print(str(len(list1) - flag1) + 'packages are not signed!!') if flag2 == len(list1): print("Signature matched for all packages!!") else: print('Signature ' + signature + ' for ' + str(len(list1) - flag2) + ' packages not matched!!') print("================================================================")
def _ssh_command(resource, *args, **kwargs): log.debug('SSH: %s', args) executor = fabric_api.run if kwargs.get('use_sudo', False): executor = fabric_api.sudo managers = [ fabric_api.settings(**ResourceSSHMixin._fabric_settings(resource)), ] if 'cwd' in kwargs: managers.append( fabric_api.cd(kwargs['cwd']) ) if 'env' in kwargs: managers.append( fabric_api.shell_env(**kwargs['env']) ) if 'warn_only' in kwargs: managers.append( fabric_api.warn_only()) with nested(*managers): return executor(' '.join(args))
def main(): while True: for item in get_pendings(): try: uid = pwd.getpwnam(item['user']).pw_uid except: subprocess.call(['useradd', item['user']]) home = join('/home', item['user']) uid = pwd.getpwnam(item['user']).pw_uid subprocess.call(['mkdir', home]) subprocess.call(['cp', '-rT', join('/etc', 'skel'), home]) subprocess.call([ 'chown', '-R', '{user}:{user}'.format(user=item['user']), home ]) subprocess.call(['chmod', '700', home]) with warn_only(): result = execute(adduser, hosts=[item['host']], user=item['user'], password=item['password'], uid=uid) host, ret = result.popitem() if ret == '': delete_pending(item['_id']) sleep(300)
def server_rm(): setup_env() server_stop() with api.warn_only(): with api.cd(api.env.project_path): with api.cd(api.env.sub_path): api.run('docker-compose -f "%(docker_compose_file)s" rm -f -v' % api.env)
def _upload_template(filename, destination, context=None, chown=True, user='******', **kwargs): kwargs['use_jinja'] = True kwargs['template_dir'] = os.path.join( os.path.dirname(os.path.realpath(__file__)), os.path.pardir, 'templates') kwargs['context'] = context kwargs['mkdir'] = False kwargs['chown'] = chown kwargs['user'] = user kwargs['use_sudo'] = True kwargs['backup'] = env.backup_conf_files if env.show_diff_when_upload and exists(destination): destination_temp = destination + '.temp' upload_template(filename, destination_temp, **kwargs) with warn_only(): run('{} {} {}'.format(env.show_diff_when_upload, destination, destination_temp)) run('rm {}'.format(destination)) run('mv {} {}'.format(destination_temp, destination)) else: upload_template(filename, destination, **kwargs)
def operation(self, target, symlink_name): path = os.path.join(utils.setting('path', self.target), self.symlink_directory, symlink_name) self.info("Symlinking {0} to {1}".format(target, path)) command = "ln -s '{0}' '{1}'".format(target, path) with hide('everything'), warn_only(): self.subtask(base.run_target, 'local', command)
def install_listbot(): """Install listbot for logstash""" sudo('apt -y install curl prips') sudo('wget -O /tmp/gen_iprep_map.sh https://raw.githubusercontent.com/dtag-dev-sec/listbot/master/src/gen_iprep_map.sh') sudo('wget -O /tmp/gen_cve_map.sh https://raw.githubusercontent.com/dtag-dev-sec/listbot/master/src/gen_cve_map.sh') sudo('chmod 777 /tmp/gen_cve_map.sh /tmp/gen_iprep_map.sh') sudo('cd /tmp; /tmp/gen_cve_map.sh') sudo('cd /tmp; /tmp/gen_iprep_map.sh') time.sleep(10) #wait for reboot to finish id = sudo("docker ps | awk '{ print $1,$2 }' | grep elk | awk '{print $1 }'") with warn_only(): sudo('docker exec %s mkdir /etc/listbot' % str(id)) sudo('docker cp /tmp/cve.yaml %s:/etc/listbot/cve.yaml' % str(id)) sudo('docker cp /tmp/iprep.yaml %s:/etc/listbot/iprep.yaml' % str(id)) sudo('docker cp /tmp/gen_cve_map.sh %s:/etc/listbot/gen_cve_map.sh' % str(id)) sudo('docker cp /tmp/gen_iprep_map.sh %s:/etc/listbot/gen_iprep_map.sh' % str(id)) test = "\"0 0 * * * /etc/listbot/gen_cve_map.sh\"" #Two different types of quotes were not enough and escaping did not work inside the sudo command, therefore this bypass. test2 = "docker exec %s bash -c 'echo %s >> /tmp/crony'" % (str(id), str(test)) sudo('%s' % str(test2)) test="\"0 0 * * * /etc/listbot/gen_iprep_map.sh\"" sudo('%s' % str(test2)) sudo('docker exec %s crontab /tmp/crony' % str(id)) sudo('docker commit %s sebp/elk:custom' % str(id)) #it does not matter if the version is the same, it will be overwritten. sudo('docker stop %s' % str(id)) sudo('docker run -d -p 5601:5601 -p 9200:9200 -p 5044:5044 --net=host --restart unless-stopped -it sebp/elk:custom') print('Done with intall_listbot, server restarting now')
def configure(): """ Copy pipeline makefile over, make directories etc... """ sudo("gpasswd -a ubuntu docker") sudo("apt-get -qy install make") # Install aws cli sudo("apt-get -qy install python-minimal") sudo( "curl --silent --show-error --retry 5 https://bootstrap.pypa.io/get-pip.py | sudo python" ) sudo("pip install awscli") with warn_only(): put("~/.aws", "/home/ubuntu") # openstack doesn't format /mnt correctly... sudo("umount /mnt") sudo("parted -s /dev/vdb mklabel gpt") sudo("parted -s /dev/vdb mkpart primary 2048s 100%") sudo("mkfs -t ext4 /dev/vdb1") sudo("sed -i 's/auto/ext4/' /etc/fstab") sudo("sed -i 's/vdb/vdb1/' /etc/fstab") sudo("mount /mnt") sudo("chmod 1777 /mnt") sudo("chown ubuntu:ubuntu /mnt") """ Downgrade docker to version supported by toil """ run("wget https://packages.docker.com/1.12/apt/repo/pool/main/d/docker-engine/docker-engine_1.12.6~cs8-0~ubuntu-xenial_amd64.deb" ) # NOQA sudo("apt-get -y remove docker docker-engine docker.io docker-ce") sudo("rm -rf /var/lib/docker") sudo("dpkg -i docker-engine_1.12.6~cs8-0~ubuntu-xenial_amd64.deb") put("{}/Makefile".format(os.path.dirname(env.real_fabfile)), "/mnt")
def pack(): # Make sure machine and dev tools are up to date sudo('sudo yum -y update') sudo('yum -y upgrade') sudo('yum -y install python27-devel python27-pip') # sudo('pip install --upgrade pip') # create a new source distribution zipfile local('git archive --format=zip HEAD -o %s' % gitfile, capture=False) # upload the source zipfile to the temporary folder on the server deploy_filename = '/tmp/wordnik.%s.zip' % str(time.time()) put(gitfile, deploy_filename) local('rm %s' % gitfile) # create a place where we can unzip the zipfile, then enter # that directory and unzip it with warn_only(): run('rm -rf ~/lambda') run('mkdir ~/lambda') run('virtualenv venv') with cd('~/lambda'): run('unzip %s' % deploy_filename) run('source ../venv/bin/activate && pip install numpy==1.10.4 -t . && pip install -r requirements.txt -t .' ) run('zip -9r ../wordnik.zip .') # Get the file back onto our local machine local('scp %s@%s:~/wordnik.zip %s' % (env.user, env.hosts[0], lambdafile)) update()
def run(self): found = False with warn_only(): if run("test -d ~/.virtualenvs/{}".format(self._name)).failed: pass else: found = True if not exists(self._path): run("mkdir {}".format(self._path)) if not found: with cd(self._path), \ prefix('WORKON_HOME={}'.format(self._workon_home)), \ prefix('source /usr/local/bin/virtualenvwrapper.sh'): run('mkvirtualenv --no-site-packages {}'.format(self._name)) # with prefix('workon {}'.format(self._name)): # run('setvirtualenvproject') with cd(self._path), \ prefix('WORKON_HOME={}'.format(self._workon_home)), \ prefix('source /usr/local/bin/virtualenvwrapper.sh'), \ prefix('workon {}'.format(self._name)): if self._requirements_filename: run('pip install -U -r {}'.format(self._requirements_filename), pty=False)
def do_pkg_rpm(self, destdir=None, targz_fp=None): if not targz_fp: targz_fp = self.pkg_tarball(destdir=destdir) # untar tarball with archive.tarfile(targz_fp, 'r:gz') as tar: tar.extractall(destdir) # create skeleton for rpmbuild rpmbuild_dir = join(destdir, 'rpmbuild') for d in ['RPMS', 'SOURCES', 'SPECS', 'SRPMS', 'BUILD']: d = join(rpmbuild_dir, d) os.makedirs(d) # copy tarball to the right place shutil.copyfile(targz_fp, join(rpmbuild_dir, 'SOURCES', self.targz_name)) # clone spec file specfile = '{0}.spec'.format(self.name) specfile_fp = join(destdir, specfile) shutil.copyfile(join(self.repo_basedir, 'etc', 'packaging', 'skel.spec'), specfile_fp) # extract list of installed files from makefile makefile_fp = join(destdir, self.pkgname, 'Makefile') content = open(makefile_fp, 'rt').read() content = text.safefind('(?s)install:\s*.*', content) files = [] lines = content.split('\n') for line in lines: if '${DESTDIR}' in line: parts = re.split('[ ]', line) path = parts[-1] path = re.sub('^' + re.escape('${DESTDIR}'), '', path) files.append(path) files.sort() # patch spec file context = { '(?m)^(Summary:\s*).*': '\g<1> {0}'.format(self.rpmmeta_summary), '(?m)^(Name:\s*).*': '\g<1> {0}'.format(self.name), '(?m)^(Version:\s*).*': '\g<1> {0}'.format(self.versiontag), '(?m)^(Release:\s*).*': '\g<1> {0}'.format(self.buildnum), '(?m)^(License:\s*).*': '\g<1> {0}'.format(self.license), '(?m)^(Group:\s*).*': '\g<1> {0}'.format(self.rpmmeta_group), '(?m)^(BuildArch:\s*).*': '\g<1> {0}'.format(self.rpmmeta_arch), '(?m)^(Source:\s*).*': '\g<1> {0}'.format(self.targz_name), '(?m)^(BuildRoot:\s*).*': '\g<1> {0}/%{{name}}-buildroot'.format(d), '(?m)^(Requires:\s*).*': '\g<1> {0}'.format(', '.join(self.rpmmeta_requires)), '(?m)^.*[<]desc[>].*': self.rpmmeta_desc, '(?m)^.*[<]files[>].*': '\n'.join(files), } text.patch_file(context, specfile_fp, dest=specfile_fp, literal=False) with warn_only(): run_cmd('rpmbuild -ba {0} --define "_topdir {1}"'.format( specfile_fp, rpmbuild_dir))
def _detect_pkg_manager(): print(yellow('Starting >> %s()' % _fn())) managers = ['apt-get', 'yum', 'zypper'] with warn_only(): for v in managers: env.pkg_manager = run('which %s' % v).stdout print(green('Package manager is %(pkg_manager)s' % env)) return
def bootstrap(): # Set kernel options print(blue('Setting sysctl options...')) with warn_only(): sysctl('vm.swappiness', 0) # Basic system configuration print(blue('Tweaking configuration...')) file_update('/etc/sysconfig/clock', 'ZONE=', 'ZONE="America/Chicago"') run('cp -f /usr/share/zoneinfo/America/Chicago /etc/localtime') # Update existing packages print(blue('Updating installed packages...')) yum.update() # Install base packages and dev tools print(blue('Installing base packages and development tools...')) yum.install('yum-utils', 'ntp', 'ntpdate', 'vim-enhanced', 'git', 'subversion', 'bind-utils', 'telnet', 'traceroute', 'curl', 'wget', 'rpm-build', 'rpmdevtools', 'spectool') yum.install_group('Development Tools') # Perform a quick time sync -- particularly good for VMs print(blue('Synchronizing system time...')) with warn_only(): service.stop('ntpd') run('ntpdate time.nist.gov') run('ntpdate tick.usno.navy.mil') service.start('ntpd') service.enable('ntpd') # Set up build user and their unprivileged environment print(blue('Creating %s user and pushing SSH key...' % build_user)) with warn_only(): sudo('useradd -m %s' % build_user) keys.push_key(user_name=build_user) sudo('chown -R %s.%s ~%s/.ssh' % (build_user, build_user, build_user)) print(blue('Setting up rpmbuild environment...')) with settings(user=build_user, warn_only=True): run('rpmdev-setuptree') run('git clone %s' % sysfm_repo) # Huzzah! print(green('CentOS build instance ready!'))
def flush_memcached(): """ Delete all cached data. """ (host, port) = settings.CACHES['default']['LOCATION'].split(':') cmd = 'echo flush_all | nc {host} {port} -vv'.format(host=host, port=port) with warn_only(): env.run(cmd)
def nginx_started(): """ Returns true/false depending on nginx service is started """ if is_systemd(): # return fabtools.systemd.is_running('nginx') with warn_only(): return 'inactive' not in fabric.api.sudo('systemctl is-active nginx.service') else: return fabtools.service.is_running('nginx')
def run_IReS_examples(): with quiet(): start_IReS() with cd("%s/asap-platform/asap-client" % IRES_HOME): for eg in ("TestOperators", "TestWorkflows", "TestWorkflowsIMR"): with warn_only(): run("mvn exec:java -Dexec.mainClass=" "\"gr.ntua.cslab.asap.examples.%s\"" % eg)
def create_loop_devices(): global created_loop_devices if not created_loop_devices: local("modprobe loop max_loop=64") for i in range(30): with warn_only(): local("sudo mknod -m 660 /dev/loop%i b 7 %i" % (i,i)) created_loop_devices = True
def local(*args, **kwargs): with warn_only(), hide('warnings'): if 'capture' not in kwargs: kwargs['capture'] = True result = fabric_api.local(*args, **kwargs) if not result.succeeded: raise OperationFailure(result) return result
def download(): for i in range(1, 10000): id = '0000' s = str(i) stock_id = id[:(len(id)-len(s))] + s # skip the stock ids that are not listed with warn_only(): local('wget -O data/{}.csv "http://real-chart.finance.yahoo.com/table.csv?s={}.HK"' .format(stock_id, stock_id) )
def install_deap(version, branch): with lcd(timeit_dir): with prefix(". {venv}{version}/bin/activate".format(venv=venv_dir, version=version)): with warn_only(): local("pip uninstall -y deap") local("find $VIRTUAL_ENV -name deap -exec rm -rf {} \;") with lcd(deap_dir): local("git checkout origin/{0}".format(branch)) local("python setup.py install")
def restart(): # on first deploy, dexter won't not be running sudo("kill -HUP `cat %s/gunicorn.pid` || initctl restart dexter || (initctl stop dexter; initctl start dexter)"\ % (env.repo_dir)) # restart dexter-celery with warn_only(): sudo('initctl stop dexter-celery') sudo('initctl start dexter-celery')
def server_stop(): setup_env() with api.warn_only(): with api.cd(api.env.project_path): with api.cd(api.env.sub_path): run_hook('before-stop') api.run('docker-compose -f "%(docker_compose_file)s" stop' % api.env) run_hook('after-stop')
def clean(): where = local with warn_only(): where("rm -f cpp/bin/*.out") where("rm -f cpp/src/caas/*.o") where("rm -f cpp/src/examples/*.o") where("rm -f cpp/src/lib/*.o") where("rm -f cpp/src/tests/*.o") where("cd ocaml/ && rm -rf _build")
def deploy(): with cd('/home/mc706/webapps/taskburn/taskburn'): run('git pull') run('python2.7 manage.py collectstatic --noinput') run('python2.7 manage.py migrate') # local('python2.7 manage.py test --noinput') #no tests written yet with warn_only(): with cd('/home/mc706/webapps/taskburn/apache2/'): run('bin/restart')