Exemplo n.º 1
0
def deploy_php(folder_info, site_info):
    rsync_project(
        local_dir='../deploy/upload/',
        remote_dir=folder_info.upload(),
    )
    packages = site_info.packages()
    for package in packages:
        name = package['name']
        archive = package['archive']
        folder = package.get('folder', None)
        tar_opt = package.get('tar', '')
        print(yellow(name))
        if folder:
            install = os.path.join(folder_info.install(), folder)
            if not exists(install):
                print(yellow('  {}'.format(install)))
                run('mkdir -p {}'.format(install))
        else:
            install = folder_info.install()
        with cd(install):
            print(yellow('  {}'.format(archive)))
            print(yellow('  {}'.format(tar_opt)))
            run('tar {} -xzf {}'.format(
                tar_opt,
                os.path.join(folder_info.upload(), archive),
            ))
Exemplo n.º 2
0
def XXXX_deploy ():
    """
    Deploy the packages in the deployment machines
    """
    print(green("Installing packages at %s" % str(env.host_string)))

    if confirm(red('Install the packages at the %s?' % (env.host_string)), default = False):
        print(yellow("... stopping XXXX"))
        if _exists('/etc/init.d/XXXX'):
            sudo('service XXXX stop')
            sudo('rm -f /etc/init.d/XXXX')

        with cd(env.admin.prefix):
            print(yellow("... cleaning up old RPMs"))
            if not _exists('tmp'): run('mkdir tmp')
            run('rm -rf tmp/*')

        directory = os.path.join(env.admin.prefix, 'tmp')
        with cd(directory):
            print(yellow("... uploading RPMs"))
            for f in env.packages.rpms:
                put(os.path.join(directory, f), '.')

            print(yellow("... installing software"))
            sudo('yum install -R 2 -q -y --nogpgcheck  *.rpm')

            print(red("... XXXX is STOPPED at %s!" % env.host_string))
Exemplo n.º 3
0
def import_file(params):
    """
    Given the database credentials and a import file it will import into a database
    """
    print yellow("Warning mysql.import_file is deprecated from version 1.0")
    params = utils.format_params(params)
     
     
    if not exists(params['import_file']):
        print(yellow("Mysql file `%s` does not exist, so no import is executed." % params['import_file']))    
    else:
        command = """
        mysql -h %(host)s -u %(user)s --password='******' %(database)s  < %(import_file)s
        """
            
        # Make params
        command_params = {'user': params['user'],
                          'password': params['password'],
                          'database': params['database'],
                          'host': params['host'],
                          'import_file':params['import_file']}
        
        run(command % command_params)
        
        print(green("Mysql file `%s` successfully imported." % command_params['import_file']))     
Exemplo n.º 4
0
def install_packages_in(env, directory, patterns, remote_upload_dir="/tmp/"):
    """
    Install all the packages in the `directory` that match `pattern` in a remote machine.
    """
    SUBDIR = "inst"

    matches = set()
    for root, dirnames, filenames in os.walk(directory):
        for pattern in patterns:
            for filename in fnmatch.filter(filenames, pattern):
                full_filename = os.path.join(root, filename)
                matches.add(full_filename)
                break

    if len(matches) == 0:
        print(red("no packages to install in %s" % directory))
        return

    print(green("... the following packages will be installed"))
    print(green("...... %s" % ", ".join(matches)))

    with cd(remote_upload_dir):
        print(yellow("... cleaning up old packages"))
        if exists(SUBDIR):
            run("rm -rf {subdir}".format(subdir=SUBDIR))
        run("mkdir {subdir}".format(subdir=SUBDIR))

    with cd(os.path.join(remote_upload_dir, SUBDIR)):
        print(yellow("... uploading packages"))
        for f in matches:
            put(f, ".")

        print(yellow("... installing software"))
        sudo("dpkg --install  *.deb")
Exemplo n.º 5
0
def _vagrant_run_ansible():
    """ Start vagrant and run top.yml. """
    print colors.yellow("[ Starting vagrant... ]")
    local('vagrant up')
    print colors.yellow("[ Running ansible playbooks... ]")
    with lcd('deploy/ansible'):
        local('ansible-playbook -i hosts/vagrant top.yml -v')
Exemplo n.º 6
0
def _check_pyenv(py_versions):
    """
    Check that pyenv and pyenv-virtualenv are installed and set up the
    compilers/virtual envs in case they do not exist
    """

    if os.system('which pyenv'):
        print red("Can't find pyenv!")
        print yellow("Are you sure you have installed it?")
        sys.exit(-2)
    elif os.system('which pyenv-virtualenv'):
        print red("Can't find pyenv-virtualenv!")
        print yellow("Are you sure you have installed it?")
        sys.exit(-2)

    # list available pyenv versions
    av_versions = os.listdir(os.path.join(env.pyenv_dir, 'versions'))

    for py_version in py_versions:
        if py_version not in av_versions:
            print green('Installing Python {0}'.format(py_version))
            pyenv_cmd('install {0}'.format(py_version), capture=True)

        local("echo \'y\' | pyenv virtualenv {0} indico-build-{0}".format(py_version))

        with pyenv_env(py_version):
            local("pip install -r requirements.dev.txt")
Exemplo n.º 7
0
def cleanup_db_backups(params):
    """
    Cleanup sql backup files from folder
    """
    print yellow("Warning mysql.cleanup_db_backups is deprecated from version 1.0")
    params = utils.format_params(params)
    
    if not 'path' in params:
        abort(red("No path param set!"))
        
    if not 'max_backup_history' in params:
        params['max_backup_history'] = 5
            
    with cd(params['path']):
        folder_result = run("ls -tr1 | grep '\.tar.gz$'")
        if len(folder_result) > 0:
            files = folder_result.split('\n')
            
            current_file_count = len(files)
            print("%s backup files found..." % current_file_count)
            
            if len(files) > params['max_backup_history']:
                total_to_remove = len(files) - params['max_backup_history']
                print("Going to remove `%s` files" % total_to_remove)
                for file in files[0:total_to_remove]:
                    file_path = "%s/%s" % (params['path'], file.strip())
                    print("- %s" % file_path)
                    run("rm %s" % (file_path))
                    
            else:
                print("No sql backup files to remove... limit is set to `%s`" % params['max_backup_history'])
        else:
            print(green("No sql backup files available..."))
Exemplo n.º 8
0
def setup_virtualenv():
    """
    Initially creates the virtualenv in the correct places (creating directory structures as necessary) on the
    remote host.
    If necessary, installs setup_tools, then pip, then virtualenv (packages)
    """
    print green('In packages module.  Installing VirtualEnv on host machine...')
    require('virtualenv_root', provided_by=('setup_env'))

    with cd('/tmp'):
        if env.os == 'ubuntu':
            sudo('apt-get install -y python-setuptools python-setuptools-devel')
        elif env.os == 'redhat':
            sudo('yum install -y python-setuptools python-setuptools-devel')
        else:
            utils.abort('Unrecognized OS %s!' % env.os)
        sudo('easy_install pip')
        sudo('pip install virtualenv', pty=True, shell=True)

        print yellow('Require user:%(sudo_user)s password!' % env)
        with fab_settings(user=env.sudo_user, sudo_prompt='ARemind sudo password: '******'mkdir -p %(www_root)s' % env)
            sudo('chown -R %(www_root)s %(virtualenv_root)s' % env)
            sudo('chgrp -R %(www_root)s %(virtualenv_root)s' % env)
            args = '--clear --distribute'
            sudo('virtualenv %s %s' % (args, env.virtualenv_root), user=env.sudo_user)
    print green('In packages module. Done installing VirtualEnv...')
Exemplo n.º 9
0
def restart_kraken(instance, wait='serial'):
    """ Restart all krakens of an instance (using pool), serially or in parallel,
        then test them. Testing serially assures that krakens are restarted serially.
        :param wait: string.
               Possible values=False or None: restart in parallel, no test
               'serial': restart serially and test
               'parallel': restart in parallel and test
               'no_test': explicitely skip tests (faster but dangerous)
        The default value is 'serial' because it is the safest scenario
        to restart the krakens of an instance in production.
    """
    if wait not in ('serial', 'parallel', 'no_test'):
        abort(yellow("Error: wait parameter must be 'serial', 'parallel' or 'no_test', found '{}'".format(wait)))
    instance = get_real_instance(instance)
    excluded = instance.name in env.excluded_instances
    # restart krakens of this instance that are also in the eng role,
    # this works with the "pool" switch mechanism used in upgrade_all()
    for host in set(instance.kraken_engines).intersection(env.roledefs['eng']):
        restart_kraken_on_host(instance, host)
        if wait == 'serial' and not excluded:
            test_kraken(instance, fail_if_error=False, wait=True, hosts=[host])
    if wait == 'parallel' and not excluded:
        test_kraken(instance, fail_if_error=False, wait=True)
    if wait != 'no_test' and excluded:
        print(yellow("Coverage '{}' has no data, not testing it".format(instance.name)))
    if wait == 'no_test':
        print(yellow("Warning Coverage '{}' not tested: parameter wait='no_test'".format(instance.name)))
Exemplo n.º 10
0
def verify_prerequisites():
    """
    Checks to make sure you have curl (with ssh) and git-ftp installed, Attempts installation via brew if you do not.
    """
    with settings(warn_only=True):

        print(colors.cyan("Verifying your installation of curl supports sftp..."))
        ret = local('curl -V | grep sftp', capture=True)
        if ret.return_code == 1:
            print(colors.yellow(
                'Your version of curl does not support sftp. Attempting installation of curl with sftp support via brew...'))
            local('brew update')
            local('brew install curl --with-ssh')
            local('brew link --force curl')
        else:
            print(colors.green('Your installation of curl supports sftp!'))

        print(colors.cyan('Ensuring you have git-ftp installed...'))
        ret = local('git ftp --version', capture=True)
        if ret.return_code == 1:
            print(colors.yellow(
                'You do not have git-ftp installed. Attempting installation via brew...'))
            local('brew update')
            local('brew install git-ftp')
        else:
            print(colors.green('You have git-ftp installed!'))

        print(colors.green('Your system is ready to deploy code!'))
Exemplo n.º 11
0
def manifest():
    """print manifest file(s) for packages
    """
    for package, egg in eggs():
        print yellow("building manifest for %s" % package)
        mf = make_manifest_file(package, egg)
        print red("manifest: " + mf)
Exemplo n.º 12
0
def build_new_server():
    """ Build a brand new server from scratch. """
    require('hosts', provided_by=['dev', 'staging', 'production'])
    ans = prompt('This will completely wipe out the server. Are you sure (YES/no)?')
    if ans != 'YES':
        print yellow('Glad you were just kidding.')
        return

    ans = prompt(yellow('%s' % env.hosts[0]) + ' will be wiped and rebuilt. Are you sure (YES/no)?')
    if ans != 'YES':
        print "Didn't think so."
        return

    env.keepalive = 30

    install_root_key()
    add_deploy_user()
    common_install()
    setup_environment()
    setup_solr()
    setup_mongo()
    setup_postgres()
    setup_supervisor()
    build_api_app()
    build_web_app()
    setup_nginx()
    full_restart()
Exemplo n.º 13
0
def _no_file_or_backed_up(target_file):
    if files.exists(target_file):
        print yellow("Already exists: %s" % target_file)
        if not console.confirm("Generate a new one? (the old one will be backed up)", False):
            return True
        return False
    return True
Exemplo n.º 14
0
def common_install():
    """Basic one-time setup for common packages for ubuntu servers.

    Currently DB stuff and web stuff are listed together. Could split
    them if we run separate servers.
    """
    print yellow('common_install...')
    sudo('apt-get update')

    # user console tools
    sudo("apt-get -y -q install emacs23-nox unzip lsof byobu httpie")

    # application libraries
    sudo("apt-get -y -q  install python-pip git-core")
    sudo('apt-get -y -q install build-essential python-dev')
    sudo("apt-get -y -q  install libpq-dev")

    sudo("apt-get -y -q install supervisor")
    sudo("pip install virtualenv")

    # web
    # memcached
    sudo('apt-get -y -q install nginx')

    # database
    # if you don't want database, at least install the client
    # sudo("apt-get -y -q install postgresql-client")
    sudo("apt-get -y -q install postgresql postgresql-contrib")
Exemplo n.º 15
0
def setup_solr():
    """ Setup solr server """
    sudo('apt-get -y -q install openjdk-7-jdk')

    ## no need?
    ## sudo('mkdir /usr/java')
    ## sudo('ln -s /usr/lib/jvm/java-7-openjdk-amd64 /usr/java/default')

    with cd('/opt'):
        sudo('wget http://archive.apache.org/dist/lucene/solr/4.9.1/solr-4.9.1.tgz')
        sudo('tar -xvf solr-4.9.1.tgz')
        sudo('cp -R solr-4.9.1/example /opt/solr')
        sudo('chown -R deploy:deploy /opt/solr')

    with settings(user='******'):
        put(_conf_path('default.jetty'), '/etc/default/jetty')
        put(_conf_path('jetty'), '/etc/init.d')
        run('chmod a+x /etc/init.d/jetty')

    if not files.exists('/var/log/solr'):
        sudo('mkdir /var/log/solr')
        sudo('chown deploy:deploy /var/log/solr')

    with cd('/opt/deploy/'):
        print yellow('creating project root for %s' % 'solr')
        run('mkdir %s' % 'solr')
Exemplo n.º 16
0
def multi_instance_launch(image_id, connection, min_count, max_count, key_name, instance_type, sec_group):
    print green("creating %s instances from...... %s" % (str(max_count), image_id))
    ress = connection.run_instances(image_id,
                                    min_count=min_count,
                                    max_count=max_count,
                                    key_name=key_name,
                                    instance_type=instance_type,
                                    security_groups=[sec_group])

    count = 1
    iid_list = []
    for instance in ress.instances:
        instance.add_tag("Name", "Instance %s - %s" % (count, strftime("%m-%d-%Y %H:%M:%S")))
        iid_list.append(instance)
        count += 1

    for instance in ress.instances:
        while True:
            if instance.state == 'pending':
                instance.update()
                print yellow("%s's status is still %s. wait for the boot!" % (instance.id, instance.state))
                sleep(2)
            elif instance.state == 'running':
                print green("Instance %s started.... and its current state is %s" % (instance.id, instance.state))
                break

    print red("Ready to rock and roll, pwn them all!")
    return iid_list
Exemplo n.º 17
0
Arquivo: exabgp.py Projeto: a16/gobgp
    def create_config(self):
        # Manpage of exabgp.conf(5):
        # https://github.com/Exa-Networks/exabgp/blob/master/doc/man/exabgp.conf.5
        cmd = CmdBuffer('\n')
        for peer, info in self.peers.iteritems():
            cmd << 'neighbor {0} {{'.format(info['neigh_addr'].split('/')[0])
            cmd << '    router-id {0};'.format(self.router_id)
            cmd << '    local-address {0};'.format(info['local_addr'].split('/')[0])
            cmd << '    local-as {0};'.format(self.asn)
            cmd << '    peer-as {0};'.format(peer.asn)

            caps = []
            if info['as2']:
                caps.append('        asn4 disable;')
            if info['addpath']:
                caps.append('        add-path send/receive;')
            if caps:
                cmd << '    capability {'
                for cap in caps:
                    cmd << cap
                cmd << '    }'

            if info['passwd']:
                cmd << '    md5-password "{0}";'.format(info['passwd'])

            if info['passive']:
                cmd << '    passive;'
            cmd << '}'

        with open('{0}/exabgpd.conf'.format(self.config_dir), 'w') as f:
            print colors.yellow('[{0}\'s new exabgpd.conf]'.format(self.name))
            print colors.yellow(str(cmd))
            f.write(str(cmd))
Exemplo n.º 18
0
def functional():
    tests = [f for f in globals().iteritems() if f[0].startswith("test")]
    random.shuffle(tests)
    total = 0
    failed = 0
    success = 0

    for (name, thing) in tests: 

   
        with settings(hide("status", "running", "warnings",
                           "aborts", "stdout", "stderr")):
            try:
                total += 1
                execute(thing)
                puts(colors.green(name))
                success += 1
            except Exception as e:
                puts(colors.red(name))
                failed += 1
            except SystemExit as e:
                puts(colors.red(name))
                failed += 1

    puts(colors.yellow("Ran {} tests".format(total)))

    if failed:
        puts(colors.yellow("FAILED: (failures={})".format(failed)))
    else:
        puts(colors.yellow("SUCCESS"))
Exemplo n.º 19
0
def get_optimizable_extensions():
    optimizable_extensions = dict()

    if exec_exists('yui-compressor'):
        optimizable_extensions['cssjs'] = dict()
        optimizable_extensions['cssjs']['extensions'] = ['.css', '.js']
        optimizable_extensions['cssjs']['overwrites'] = False
        optimizable_extensions['cssjs']['exec'] = 'yui-compressor -o "%(target)s" "%(source)s"'
        optimizable_extensions['cssjs']['file_type'] = 'CSS or JS'
    else:
        warn(yellow('>>> Unable to optimize css/js files: yui-compressor not found!'))

    if env.optimize_images:
        if exec_exists('optipng'):
            optimizable_extensions['png'] = dict()
            optimizable_extensions['png']['extensions'] = ['.png']
            optimizable_extensions['png']['overwrites'] = True
            optimizable_extensions['png']['exec'] = 'optipng -quiet -preserve -- "%s"'
            optimizable_extensions['png']['file_type'] = 'PNG'
        else:
            warn(yellow('>>> Unable to optimize png images: optipng not found!'))

        if exec_exists('jpegoptim'):
            optimizable_extensions['jpeg'] = dict()
            optimizable_extensions['jpeg']['extensions'] = ['.jpg', '.jpeg']
            optimizable_extensions['jpeg']['overwrites'] = True
            optimizable_extensions['jpeg']['exec'] = 'jpegoptim --totals --strip-all  -- "%s"'
            optimizable_extensions['jpeg']['file_type'] = 'JPEG'
        else:
            warn(yellow('>>> Unable to optimize jpeg images: jpegoptim not found!'))

    return optimizable_extensions
Exemplo n.º 20
0
def deploy(branch=None):
    branch_to_push = GIT_BRANCH if branch == None else branch

    if not _scary_confirm('You\'re pushing to the production server. Set condition one throughout the ship!'):
        return
    print ''

    # Compile assets locally
    print cyan('Compiling assets...', bold=True)
    local('rm -rf static/.webassets-cache')
    local('rm -rf static/.generated')
    local('. venv/bin/activate && python build_assets.py')
    print ''

    # Push the latest code up
    _git_push(GIT_PATH, branch_to_push)
    print ''

    # Check out the latest code
    _git_checkout_to_dir(GIT_PATH, APP_PATH, branch_to_push)

    with cd(APP_PATH):
        print yellow('Installing requirements...', bold=True)
        run('source venv/bin/activate && pip install -r requirements.txt')

    print cyan('Uploading assets...', bold=True)
    put('static/.webassets-cache/', APP_PATH + '/static/')
    put('static/.generated/', APP_PATH + '/static/')
    print ''

    _restart_uwsgi()
    print ''

    print green('Deploy to %s OK.' % env.host, bold=True)
Exemplo n.º 21
0
def setup():
    """
        Installs flask, mako and sqlalchemy using pip
    """

    ## installing flask
    print yellow("Installing latest Flask using pip... ")
    local("pip install flask")
    print blue("Finished installing flask")

    ## download Flask-Mako zip file from github first
    print yellow("Downloading Mako templates for Flask from github... ")
    local("git clone https://github.com/tzellman/flask-mako.git")
    print blue("Finished downloading Mako templates for Flask")

    ## installing Mako templates for flask
    print yellow("Installing Mako templates for Flask using tzellman's project on github... ")
    with lcd("flask-mako"):
        local("python setup.py install")

    print yellow("Clearing setup files for Mako-Flask... ")
    local("rm -rf flask-mako")
    print blue("Finished installing Mako templates for Flask")

    ##installing SQLAlchemy for Flask
    print yellow("Installing Flask-SQLAlchemy... ")
    local("pip install flask-sqlalchemy")
    print blue("Finished installing Flask-SQLAlchemy")

    print blue("Finished setup")
Exemplo n.º 22
0
def print_env(full=False):
    if full:
        for key in env.keys():
            print yellow(key), green(getattr(env, key))
    else:
        for key in "projectname package_version build_dir dist_dir version_file".split():
            print yellow(key), green(getattr(env, key))
Exemplo n.º 23
0
def _get_proxy_group():
    print yellow('Retrieving security group.')
    groups = conn.get_all_security_groups()
    for group in groups:
        if group.name == SECURITY_GROUP:
            return group.name
    return _create_proxy_group()
Exemplo n.º 24
0
def submit(remote='origin', skip_tests=False):
    '''Push the current feature branch and create/update pull request.'''
    if not skip_tests:
        with settings(warn_only=True):
            if not test.run():
                if confirm(yellow('Tests failed. Continue anyway?')):
                    print yellow('Ignoring failed tests. Be careful.')
                else:
                    print red('Terminating due to failed tests.')
                    return
            else:
                print green('Tests OK!')

    first_submission = not git.remote_branch_exists(remote=remote)
    git.pull()
    authors()
    git.push()

    if not first_submission:
        print green('Pull request sucessfully updated.')
    elif git.hub_installed():
        current_branch = git.current_branch()
        local('hub pull-request -b bmun:master -h %s -f' % current_branch)
        print green('Pull request successfully issued.')
    else:
        print green('Branch successfully pushed. Go to GitHub to issue a pull request.')
Exemplo n.º 25
0
def configure_statsd():
    """Configure statsd server"""
    print(green("Configuring statsd"))

    print(yellow("Install statsd prerequisites"))
    apt_get("install nodejs git-core")

    print(yellow("Configuring statsd service account"))
    if not exists("/home/statsd"):
        sudo("adduser --system --disabled-password --shell /bin/bash --group statsd")
        sudo("install -m 755 -o statsd -g statsd -d /home/statsd/.ssh")
        sudo("install -m 600 -o statsd -g statsd /home/ubuntu/.ssh/authorized_keys /home/statsd/.ssh/authorized_keys")

    if not exists("/opt/statsd"):
        sudo("install -d -o statsd -g statsd -m 700 /opt/statsd")

    with settings(user="******"):
        append("/home/statsd/.gitignore", ['config.js'])

        print(yellow("Configuring /opt/statsd"))
        with cd("/opt/statsd"):
            if not exists("/opt/statsd/.git"):
                run("git clone --depth 50 https://github.com/etsy/statsd.git /opt/statsd/")
            run("git clean -fd")
            run("git pull --force")
            run("git reset --hard")

        put("deploy/statsd/localConfig.js", "/opt/statsd/localConfig.js")

    put("deploy/etc/init/statsd.conf", "/etc/init/statsd.conf",
        use_sudo=True)
Exemplo n.º 26
0
def install_gems():
    with shell_env(HOME='/home/' + USERNAME, PATH="/home/" + USERNAME + "/.rbenv/bin:$PATH"):
        if sudo('test -f ~/.gemrc && grep "gem:" ~/.gemrc > /dev/null').failed:
            sudo('echo "gem: --no-ri --no-rdoc -V" >> ~/.gemrc')
        else:
            print green('".gemrc" already exists')

        gems = [
            'bundler',
            'pry',
            'rails',
            'rbenv-rehash',
            'rspec',
            'spring',
        ]

        not_installed = []
        installed = []
        for g in gems:
            if sudo("eval \"$(rbenv init -)\" && gem list | awk '{print $1}' | egrep '^" + g + "$' > /dev/null").failed:
                not_installed.append(g)
            else:
                installed.append(g)
        if len(installed) > 0:
            print green('"%s" is already installed') % ', '.join(installed)
        if len(not_installed) > 0:
            print yellow('"%s" is not installed') % ', '.join(not_installed)
            sudo('eval "$(rbenv init -)" && gem install ' + ' '.join(not_installed))

        sudo('rbenv rehash')
Exemplo n.º 27
0
def bootem(servers=None):
    """
    Boot all `openstack` servers as per the config.

    Usage:
        fab boot
        fab provider.bootem
    """
    if servers is None:
        servers = settings.SERVERS

    for server in servers:
        boot(server["servername"], server["image"], server["flavor"], env.bootmachine_servers)

    print(yellow("... verifying that all servers are ``ACTIVE``."))

    env.bootmachine_servers = list_servers(as_list=True)

    slept = 0
    sleep_interval = 10
    status = None
    while status is None:
        statuses = [n.status for n in env.bootmachine_servers]
        if ("BUILD" or "UNKNOWN") not in statuses:
            status = "ACTIVE"
            print(green("all servers are currently ``ACTIVE``!"))
        else:
            time.sleep(sleep_interval)
            slept += sleep_interval
            if slept <= 60:
                print(yellow("... waited {0} seconds.".format(slept)))
            else:
                minutes, seconds = divmod(slept, 60)
                print(yellow("... waited {0} min and {1} sec".format(minutes, seconds)))
            env.bootmachine_servers = list_servers(as_list=True)
Exemplo n.º 28
0
def clean_plugins():
    """
    Checks for installed plugins and removes the unused.
    """
    require('public_dir')
    installed_plugins = json.loads(
        run('wp plugin list --format=json --path={0}'.  format(env.public_dir))
    )
    plugins_to_delete = []
    print "Verifying installed plugins..."
    for installed_plugin in installed_plugins:
        if not search_plugin(installed_plugin['name']):
            plugins_to_delete.append(installed_plugin['name'])
    if plugins_to_delete:
        print yellow(
            u'There are plugins installed on wordpress '
            u'that are not specified in settings.json. '
            u'These plugins must be uninstalled before installing, '
            u'updating, or syncing new plugins.\n'
            u'The following list shows plugins that are not specified '
            u'in settings.json:'
        )
        count = 1
        for plugin in plugins_to_delete:
            print yellow(str(count) + ".- " + plugin)
            count = count + 1
        if confirm(yellow('Do you want to delete these plugins?')):
            for plugin in plugins_to_delete:
                run('wp plugin deactivate {0} --path={1}'.
                    format(plugin, env.public_dir))
                run('wp plugin uninstall {0} --path={1}'.
                    format(plugin, env.public_dir))
        else:
            sys.exit(0)
Exemplo n.º 29
0
def install_package_in(env, deb, remote_upload_dir="/tmp/"):
    """
    Install all the packages in the `directory` that match `pattern` in a remote machine.
    """
    SUBDIR = "inst"

    if os.path.exists(deb):
        print(red("%s packages not found"))
        return

    print(green("... the following packages will be installed"))
    print(green("...... %s" % deb))

    with cd(remote_upload_dir):
        print(yellow("... cleaning up old packages"))
        if exists(SUBDIR):
            run("rm -rf {subdir}".format(subdir=SUBDIR))
        run("mkdir {subdir}".format(subdir=SUBDIR))

    with cd(os.path.join(remote_upload_dir, SUBDIR)):
        print(yellow("... uploading packages"))
        put(deb, ".")

        print(yellow("... installing software"))
        sudo("dpkg --install  *.deb")
Exemplo n.º 30
0
def runNodes(num=0):
    """
    Start container as nodes
    """
    global ContainerList
    #  Small check to be sure that the container are available
    centos7_check = run(
        "docker images |grep \"matthdan/docker-centos7-evo\" |awk '{print $3}'",
        quiet=True)
    if centos7_check is None:
        print(red('The docker image is not present \n' +
                  ' Please solve this trouble before retrying... \n'))
        sys.exit(1)

    print(yellow('Starting the master node ...'))
    rhel7_master_CID = run('docker run -d --name master \
                           --dns 172.17.42.1 matthdan/docker-centos7-evo')
    ContainerList.append(rhel7_master_CID)

    print(yellow('Starting the compute node ...'))
    for x in range(0, int(num)):
        rhel7_compute_CID = run('docker run -d --name node' + str(x)
                                + ' --dns  172.17.42.1 matthdan/docker-centos7-evo')
        ContainerList.append(rhel7_compute_CID)

    print ContainerList
Exemplo n.º 31
0
def app_deploy_ready():

    config = _find_opsworks_stack()

    deploy = OpsWorksDeployment()

    # find stack matching the requested environment
    stack = deploy.get_stack(config['stack_id'])
    print "OpsWorks Stack: %(Name)s" % stack

    # find the application
    app = deploy.get_app(config['app_id'])
    print "OpsWorks Application: %(Name)s" % app

    print "--> Checking for incomplete application deployments..."
    deployments = deploy.get_app_deployments(config['app_id'])

    failed = False

    if len(deployments) > 0:
        running = True
        while running:
            incomplete_deployments = deploy.get_incomplete_deployments(
                deployments)

            # stop if we have completed all deployments
            if len(incomplete_deployments) == 0:
                running = False
                break

            running = True
            for deployment in incomplete_deployments:
                print yellow(
                    "* %(CreatedAt)s | %(CommandName)s | %(Status)s | %(DeploymentId)s"
                    % {
                        'DeploymentId': deployment['DeploymentId'],
                        'CreatedAt': deployment['CreatedAt'],
                        'Status': deployment['Status'],
                        'CommandName': deployment['Command']['Name'],
                    })

            print "--> Waiting for application deployments to complete..."
            sleep(5)  # delays for 5 seconds

        complete_deployments = deploy.get_deployments(deployments)
        for deployment in complete_deployments:
            if deployment['Status'] == "failed":
                print red(
                    "* %(CreatedAt)s | %(CommandName)s | %(Status)s | %(DeploymentId)s"
                    % {
                        'DeploymentId': deployment['DeploymentId'],
                        'CreatedAt': deployment['CreatedAt'],
                        'Status': deployment['Status'],
                        'CommandName': deployment['Command']['Name'],
                    })
                failed = True

    result = OpsWorksResult()
    result.failed = False

    if failed:
        result.failed = True
        print red("--> Failed deployments found")
    else:
        print green("--> All application deployments complete")

    result.succeeded = not result.failed
    return result
Exemplo n.º 32
0
    def _create_config_bgp(self):
        config = {
            'global': {
                'config': {
                    'as': self.asn,
                    'router-id': self.router_id,
                },
                'route-selection-options': {
                    'config': {
                        'external-compare-router-id': True,
                    },
                },
            },
            'neighbors': [],
        }

        if self.zebra and self.zapi_version == 2:
            config['global']['use-multiple-paths'] = {
                'config': {
                    'enabled': True
                }
            }

        for peer, info in self.peers.iteritems():
            afi_safi_list = []
            if info['interface'] != '':
                afi_safi_list.append(
                    {'config': {
                        'afi-safi-name': 'ipv4-unicast'
                    }})
                afi_safi_list.append(
                    {'config': {
                        'afi-safi-name': 'ipv6-unicast'
                    }})
            else:
                version = netaddr.IPNetwork(info['neigh_addr']).version
                if version == 4:
                    afi_safi_list.append(
                        {'config': {
                            'afi-safi-name': 'ipv4-unicast'
                        }})
                elif version == 6:
                    afi_safi_list.append(
                        {'config': {
                            'afi-safi-name': 'ipv6-unicast'
                        }})
                else:
                    Exception(
                        'invalid ip address version. {0}'.format(version))

            if info['vpn']:
                afi_safi_list.append(
                    {'config': {
                        'afi-safi-name': 'l3vpn-ipv4-unicast'
                    }})
                afi_safi_list.append(
                    {'config': {
                        'afi-safi-name': 'l3vpn-ipv6-unicast'
                    }})
                afi_safi_list.append(
                    {'config': {
                        'afi-safi-name': 'l2vpn-evpn'
                    }})
                afi_safi_list.append({
                    'config': {
                        'afi-safi-name': 'rtc'
                    },
                    'route-target-membership': {
                        'config': {
                            'deferral-time': 10
                        }
                    }
                })

            if info['flowspec']:
                afi_safi_list.append(
                    {'config': {
                        'afi-safi-name': 'ipv4-flowspec'
                    }})
                afi_safi_list.append(
                    {'config': {
                        'afi-safi-name': 'l3vpn-ipv4-flowspec'
                    }})
                afi_safi_list.append(
                    {'config': {
                        'afi-safi-name': 'ipv6-flowspec'
                    }})
                afi_safi_list.append(
                    {'config': {
                        'afi-safi-name': 'l3vpn-ipv6-flowspec'
                    }})

            neigh_addr = None
            interface = None
            if info['interface'] == '':
                neigh_addr = info['neigh_addr'].split('/')[0]
            else:
                interface = info['interface']
            n = {
                'config': {
                    'neighbor-address': neigh_addr,
                    'neighbor-interface': interface,
                    'peer-as': peer.asn,
                    'auth-password': info['passwd'],
                    'vrf': info['vrf'],
                    'remove-private-as': info['remove_private_as'],
                },
                'afi-safis': afi_safi_list,
                'timers': {
                    'config': {
                        'connect-retry': 10,
                    },
                },
                'transport': {
                    'config': {},
                },
            }

            n['as-path-options'] = {'config': {}}
            if info['allow_as_in'] > 0:
                n['as-path-options']['config']['allow-own-as'] = info[
                    'allow_as_in']
            if info['replace_peer_as']:
                n['as-path-options']['config']['replace-peer-as'] = info[
                    'replace_peer_as']

            if ':' in info['local_addr']:
                n['transport']['config']['local-address'] = info[
                    'local_addr'].split('/')[0]

            if info['passive']:
                n['transport']['config']['passive-mode'] = True

            if info['is_rs_client']:
                n['route-server'] = {'config': {'route-server-client': True}}

            if info['local_as']:
                n['config']['local-as'] = info['local_as']

            if info['prefix_limit']:
                for v in afi_safi_list:
                    v['prefix-limit'] = {
                        'config': {
                            'max-prefixes': info['prefix_limit'],
                            'shutdown-threshold-pct': 80
                        }
                    }

            if info['graceful_restart'] is not None:
                n['graceful-restart'] = {
                    'config': {
                        'enabled': True,
                        'restart-time': 20
                    }
                }
                for afi_safi in afi_safi_list:
                    afi_safi['mp-graceful-restart'] = {
                        'config': {
                            'enabled': True
                        }
                    }

                if info['llgr'] is not None:
                    n['graceful-restart']['config']['restart-time'] = 1
                    n['graceful-restart']['config'][
                        'long-lived-enabled'] = True
                    for afi_safi in afi_safi_list:
                        afi_safi['long-lived-graceful-restart'] = {
                            'config': {
                                'enabled': True,
                                'restart-time': 30
                            }
                        }

            if info['is_rr_client']:
                cluster_id = self.router_id
                if 'cluster_id' in info and info['cluster_id'] is not None:
                    cluster_id = info['cluster_id']
                n['route-reflector'] = {
                    'config': {
                        'route-reflector-client': True,
                        'route-reflector-cluster-id': cluster_id
                    }
                }

            if info['addpath']:
                n['add-paths'] = {'config': {'receive': True, 'send-max': 16}}

            if len(info.get('default-policy', [])) + len(
                    info.get('policies', [])) > 0:
                n['apply-policy'] = {'config': {}}

            for typ, p in info.get('policies', {}).iteritems():
                n['apply-policy']['config']['{0}-policy-list'.format(typ)] = [
                    p['name']
                ]

            def _f(v):
                if v == 'reject':
                    return 'reject-route'
                elif v == 'accept':
                    return 'accept-route'
                raise Exception('invalid default policy type {0}'.format(v))

            for typ, d in info.get('default-policy', {}).iteritems():
                n['apply-policy']['config']['default-{0}-policy'.format(
                    typ)] = _f(d)

            config['neighbors'].append(n)

        config['defined-sets'] = {}
        if self.prefix_set:
            config['defined-sets']['prefix-sets'] = self.prefix_set

        if self.neighbor_set:
            config['defined-sets']['neighbor-sets'] = self.neighbor_set

        if self.bgp_set:
            config['defined-sets']['bgp-defined-sets'] = self.bgp_set

        policy_list = []
        for p in self.policies.itervalues():
            policy = {'name': p['name']}
            if 'statements' in p:
                policy['statements'] = p['statements']
            policy_list.append(policy)

        if len(policy_list) > 0:
            config['policy-definitions'] = policy_list

        if self.zebra:
            config['zebra'] = {
                'config': {
                    'enabled': True,
                    'redistribute-route-type-list': ['connect'],
                    'version': self.zapi_version
                }
            }

        with open('{0}/gobgpd.conf'.format(self.config_dir), 'w') as f:
            print colors.yellow('[{0}\'s new gobgpd.conf]'.format(self.name))
            if self.config_format is 'toml':
                raw = toml.dumps(config)
            elif self.config_format is 'yaml':
                raw = yaml.dump(config)
            elif self.config_format is 'json':
                raw = json.dumps(config)
            else:
                raise Exception('invalid config_format {0}'.format(
                    self.config_format))
            print colors.yellow(indent(raw))
            f.write(raw)
Exemplo n.º 33
0
def stage10_vtep():
    metadata = Config(os.environ["CONFIGFILE"])

    if cuisine.file_exists("/tmp/.%s.lck" % sys._getframe().f_code.co_name):
        return

    puts(yellow("bumping to utopic"))
    run("""
sed -i 's,%s,utopic,g;' /etc/apt/sources.list
dpkg -l | grep openvswitch-vtep || apt-get update
""" % metadata.config["os_release_codename"])

    # set up ovsdb-server in the vtep sandbox
    #
    cuisine.package_ensure(["openvswitch-switch", "openvswitch-vtep", "vlan"])

    compute_ip = "%s.%s" % (metadata.config["vpn_base"],
                            metadata.config["idx"][env.host_string])

    run("""
IP="%s"
PORT="%s"

#
# do we need to patch the init script?
#
INITSCRIPT="/etc/init.d/openvswitch-vtep"
if [[ "$(grep -- '--remote=ptcp:6262' ${INITSCRIPT})" == "" ]]; then
    TEMPFILE="$(mktemp)"

    grep -B9999 -- '--remote=db:hardware_vtep,Global,managers' ${INITSCRIPT} > ${TEMPFILE}
    echo '        --remote=ptcp:6262 \\' >> ${TEMPFILE}
    grep -A9999 -- '--private-key=/etc/openvswitch/ovsclient-privkey.pem' ${INITSCRIPT} >> ${TEMPFILE}

    mv ${TEMPFILE} ${INITSCRIPT}
fi

chmod 0755 ${INITSCRIPT}

rm /etc/openvswitch/vtep.db
rm /etc/openvswitch/conf.db

cat >/etc/default/openvswitch-vtep<<EOF
ENABLE_OVS_VTEP="true"
EOF

/etc/init.d/openvswitch-switch stop
/etc/init.d/openvswitch-vtep stop

sleep 2

ps axufwwwwwwww | grep -v grep | grep ovs | awk '{print $2;}' | xargs -n1 --no-run-if-empty kill -9

/etc/init.d/openvswitch-switch start
/etc/init.d/openvswitch-vtep start

ovs-vsctl add-br vtep
vtep-ctl add-ps vtep

vtep-ctl set Physical_Switch vtep tunnel_ips=${IP}
vtep-ctl set Physical_Switch vtep management_ips=${IP}

ovs-vsctl add-port vtep ${PORT}
vtep-ctl add-port vtep ${PORT}

ifconfig ${PORT} up

for P in 1 3 5 7 9; do
    ip a | grep veth${P} || ip link add type veth

    ifconfig veth${P} 192.168.77.20${P}/24
    ifconfig veth$(( ${P} - 1 )) up
    ifconfig veth${P} up

    ovs-vsctl add-port vtep veth$(( ${P} - 1 ))
    vtep-ctl add-port vtep veth$(( ${P} - 1 ))
done

screen -d -m -- /usr/share/openvswitch/scripts/ovs-vtep --log-file=/var/log/openvswitch/ovs-vtep.log --pidfile=/var/run/openvswitch/ovs-vtep.pid vtep

exit 0

""" % (compute_ip, metadata.config["vtep_port"]))
Exemplo n.º 34
0
def yellow_alert(msg, bold=True):
    print yellow('===>', bold=bold), white(msg, bold=bold)
Exemplo n.º 35
0
 def get_branch():
     print(colors.yellow(u'Figuring out branch for commit: {}'.format(line.replace('{}', '-'))))
     return self.get_branch(commit_hash, ambiguous=True)
Exemplo n.º 36
0
def print_command(command):
    _print(
        blue("$ ", bold=True) + yellow(command, bold=True) +
        red(" ->", bold=True))
Exemplo n.º 37
0
def package_release(py_versions=None,
                    build_dir=None,
                    system_node=False,
                    indico_version=None,
                    upstream=None,
                    tag_name=None,
                    github_auth=None,
                    overwrite=None,
                    ssh_server_host=None,
                    ssh_server_port=None,
                    ssh_user=None,
                    ssh_key=None,
                    ssh_dest_dir=None,
                    no_clean=False,
                    force_clean=False,
                    upload_to=None,
                    build_here=False):
    """
    Create an Indico release - source and binary distributions
    """

    py_versions = py_versions.split('/') if py_versions else env.py_versions
    upload_to = upload_to.split('/') if upload_to else []

    build_dir = build_dir or env.build_dir
    upstream = upstream or env.github['upstream']

    ssh_server_host = ssh_server_host or env.ssh['host']
    ssh_server_port = ssh_server_port or env.ssh['port']
    ssh_user = ssh_user or env.ssh['user']
    ssh_key = ssh_key or env.ssh['key']
    ssh_dest_dir = ssh_dest_dir or env.ssh['dest_dir']

    indico_version = indico_version or 'master'

    local('mkdir -p {0}'.format(build_dir))

    _check_pyenv(py_versions)

    if build_here:
        _package_release(os.path.dirname(__file__), py_versions, system_node)
    else:
        with lcd(build_dir):
            if os.path.exists(os.path.join(build_dir, 'indico')):
                print yellow("Repository seems to already exist.")
                with lcd('indico'):
                    local('git fetch {0}'.format(upstream))
                    local('git reset --hard FETCH_HEAD')
                    if not no_clean:
                        local('git clean -df')
            else:
                local('git clone {0}'.format(upstream))
            with lcd('indico'):
                print green(
                    "Checking out branch \'{0}\'".format(indico_version))
                local('git checkout {0}'.format(indico_version))

                _package_release(os.path.join(build_dir, 'indico'),
                                 py_versions, system_node)

    for u in upload_to:
        if u == 'github':
            upload_github(build_dir, tag_name, github_auth, overwrite,
                          indico_version)
        elif u == 'ssh':
            upload_ssh(build_dir, ssh_server_host, ssh_server_port, ssh_user,
                       ssh_key, ssh_dest_dir)

    if not build_here and force_clean:
        cleanup(build_dir, force=True)
Exemplo n.º 38
0
try:
    # Only works in newer versions of fabric
    env.colorize_errors = True
except AttributeError:
    pass

git_command = """git log --topo-order --reverse --format="%aN <%aE>" | awk ' !x[$0]++'"""

git_people = unicode(local(git_command, capture=True),
                     'utf-8').strip().split("\n")

from distutils.version import LooseVersion

git_ver = local('git --version', capture=True)[12:]
if LooseVersion(git_ver) < LooseVersion('1.8.4.2'):
    print(yellow("Please use a newer git version >= 1.8.4.2"))


def move(l, i1, i2):
    x = l.pop(i1)
    l.insert(i2, x)


# Do the few changes necessary in order to reproduce AUTHORS:

move(git_people, 2, 0)  # Ondřej Čertík
move(git_people, 42, 1)  # Fabian Pedregosa
move(git_people, 22, 2)  # Jurjen N.E. Bos
git_people.insert(4, "*Marc-Etienne M.Leveille <*****@*****.**>")
move(git_people, 10, 5)  # Brian Jorgensen
git_people.insert(11, "*Ulrich Hecht <*****@*****.**>")
Exemplo n.º 39
0
try:
    # Only works in newer versions of fabric
    env.colorize_errors = True
except AttributeError:
    pass

git_command = 'git log --format="%aN <%aE>" | sort -u'

git_people = unicode(local(git_command, capture=True),
                     'utf-8').strip().split("\n")

from distutils.version import LooseVersion

git_ver = local('git --version', capture=True)[12:]
if LooseVersion(git_ver) < LooseVersion('1.8.4.2'):
    print(yellow("Please use a newer git version >= 1.8.4.2"))

with open(
        os.path.realpath(
            os.path.join(__file__, os.path.pardir, os.path.pardir,
                         "AUTHORS"))) as fd:
    AUTHORS = unicode(fd.read(), 'utf-8')

firstauthor = "Ondřej Čertík"

authors = AUTHORS[AUTHORS.find(firstauthor):].strip().split('\n')

# People who don't want to be listed in AUTHORS
authors_skip = ["Kirill Smelkov <*****@*****.**>"]

predate_git = 0
Exemplo n.º 40
0
def really_run():
    """ If called set dry_run as false """
    print(
        yellow("WARNING: really_run() is now deprecated and dry_run is always"
               "set to 'False'. Use dry_run() to set it to 'True'"))
Exemplo n.º 41
0
def build():
    print(yellow('Building docker image...'))
    with lcd('.'):
        local('docker build --tag="{0}" .'.format(project_name))
def build(outdir=None, device_sdk=None, simulator_sdk=None, **kwargs):
    """
    Build card.io SDK.
    """
    print(colors.white("Setup", bold=True))

    to_hide = [] if env.verbose else ["stdout", "stderr", "running"]

    xcode_preprocessor_flags = {}

    if not outdir:
        message = """
                     You must provide outdir=<sdk output parent dir>
                     Example usage:
                       `fab build:outdir=~` - normal build
                       `fab build:outdir=~,SCAN_EXPIRY=0` - to disable the experimental expiry-scan feature
                  """
        abort(textwrap.dedent(message).format(**locals()))

    if _confirm_ready_for_release("assets/strings"):
        sys.exit(1)

    outdir = os.path.abspath(os.path.expanduser(outdir))
    print colors.yellow(
        "Will save release sdk to {outdir}".format(outdir=outdir))
    out_subdir = "card.io_ios_sdk_{0}".format(_version_str(show_dirty=True))

    xcode_preprocessor_flags.update(kwargs)
    formatted_xcode_preprocessor_flags = " ".join(
        "{k}={v}".format(k=k, v=v)
        for k, v in xcode_preprocessor_flags.iteritems())
    extra_xcodebuild_settings = "GCC_PREPROCESSOR_DEFINITIONS='$(value) {formatted_xcode_preprocessor_flags}'".format(
        **locals())

    device_sdk = device_sdk or "iphoneos"
    simulator_sdk = simulator_sdk or "iphonesimulator"

    arch_to_sdk = (("armv7", device_sdk), ("armv7s", device_sdk), ("arm64",
                                                                   device_sdk),
                   ("i386", simulator_sdk), ("x86_64", simulator_sdk))

    with settings(hide(*to_hide)):
        icc_root = local("git rev-parse --show-toplevel", capture=True)

    temp_dir = tempfile.mkdtemp() + os.sep
    atexit.register(shutil.rmtree, temp_dir, True)

    print(colors.white("Preparing dmz", bold=True))
    with settings(hide(*to_hide)):
        with lcd(os.path.join(icc_root, "dmz")):
            dmz_all_filename = os.path.join("dmz", "dmz_all.cpp")
            with open(dmz_all_filename) as f:
                old_dmz_all = f.read()
            local("fab {verbose} concat".format(
                verbose="verbose" if env.verbose else ""))
            with open(dmz_all_filename) as f:
                new_dmz_all = f.read()
            if old_dmz_all != new_dmz_all:
                print(
                    colors.red("WARNING: dmz_all.h was not up to date!",
                               bold=True))

    print(colors.white("Building", bold=True))
    print(colors.white("Using temp dir {temp_dir}".format(**locals())))
    print(
        colors.white(
            "Using extra Xcode flags: {formatted_xcode_preprocessor_flags}".
            format(**locals())))

    with lcd(icc_root):

        with settings(hide(*to_hide)):
            lipo_build_dirs = {}
            build_config = "Release"
            arch_build_dirs = {}
            for arch, sdk in arch_to_sdk:
                print(
                    colors.blue(
                        "({build_config}) Building {arch}".format(**locals())))

                base_xcodebuild_command = "xcrun xcodebuild -target CardIO -arch {arch} -sdk {sdk} -configuration {build_config}".format(
                    **locals())

                clean_cmd = "{base_xcodebuild_command} clean".format(
                    **locals())
                local(clean_cmd)

                build_dir = os.path.join(temp_dir, build_config, arch)
                arch_build_dirs[arch] = build_dir
                os.makedirs(build_dir)
                parallelize = "" if env.verbose else "-parallelizeTargets"  # don't parallelize verbose builds, it's hard to read the output
                build_cmd = "{base_xcodebuild_command} {parallelize} CONFIGURATION_BUILD_DIR={build_dir}  {extra_xcodebuild_settings}".format(
                    **locals())
                local(build_cmd)

            print(colors.blue("({build_config}) Lipoing".format(**locals())))
            lipo_dir = os.path.join(temp_dir, build_config, "universal")
            lipo_build_dirs[build_config] = lipo_dir
            os.makedirs(lipo_dir)
            arch_build_dirs["universal"] = lipo_dir
            # in Xcode 4.5 GM, xcrun selects the wrong lipo to use, so circumventing xcrun for now :(
            lipo_cmd = "`xcode-select -print-path`/Platforms/iPhoneOS.platform/Developer/usr/bin/lipo " \
                       "           {armv7}/{libname}" \
                       "           -arch armv7s {armv7s}/{libname}" \
                       "           -arch arm64 {arm64}/{libname}" \
                       "           -arch i386 {i386}/{libname}" \
                       "           -arch x86_64 {x86_64}/{libname}" \
                       "           -create" \
                       "           -output {universal}/{libname}".format(libname=env.libname, **arch_build_dirs)
            local(lipo_cmd)

            print(
                colors.blue("({build_config}) Stripping debug symbols".format(
                    **locals())))
            strip_cmd = "xcrun strip -S {universal}/{libname}".format(
                libname=env.libname, **arch_build_dirs)
            local(strip_cmd)

            out_subdir_suffix = "_".join("{k}-{v}".format(k=k, v=v)
                                         for k, v in kwargs.iteritems())
            if out_subdir_suffix:
                out_subdir_suffix = "_" + out_subdir_suffix
            out_subdir += out_subdir_suffix
            sdk_dir = os.path.join(outdir, out_subdir)

            print(
                colors.white("Assembling release SDK in {sdk_dir}".format(
                    sdk_dir=sdk_dir),
                             bold=True))
            if os.path.isdir(sdk_dir):
                shutil.rmtree(sdk_dir)
            cardio_dir = os.path.join(sdk_dir, "CardIO")
            os.makedirs(cardio_dir)

            header_files = glob.glob(os.path.join("CardIO_Public_API", "*.h"))
            _copy(header_files, cardio_dir)

            libfile = os.path.join(lipo_build_dirs["Release"], env.libname)
            shutil.copy2(libfile, cardio_dir)

            release_dir = os.path.join(icc_root, "Release")
            shutil.copy2(os.path.join(release_dir, "release_notes.txt"),
                         sdk_dir)
            shutil.copy2(os.path.join(release_dir, "CardIO.podspec"), sdk_dir)
            shutil.copy2(os.path.join(release_dir, "acknowledgments.md"),
                         sdk_dir)
            shutil.copy2(os.path.join(release_dir, "LICENSE.md"), sdk_dir)
            shutil.copy2(os.path.join(release_dir, "README.md"), sdk_dir)
            shutil.copytree(os.path.join(release_dir, "SampleApp"),
                            os.path.join(sdk_dir, "SampleApp"),
                            ignore=shutil.ignore_patterns(".DS_Store"))
            shutil.copytree(os.path.join(release_dir, "SampleApp-Swift"),
                            os.path.join(sdk_dir, "SampleApp-Swift"),
                            ignore=shutil.ignore_patterns(".DS_Store"))
Exemplo n.º 43
0
    def _create_config_bgp(self):

        c = CmdBuffer()
        c << 'hostname bgpd'
        c << 'password zebra'
        c << 'router bgp {0}'.format(self.asn)
        c << 'bgp router-id {0}'.format(self.router_id)
        if any(info['graceful_restart'] for info in self.peers.itervalues()):
            c << 'bgp graceful-restart'

        version = 4
        for peer, info in self.peers.iteritems():
            version = netaddr.IPNetwork(info['neigh_addr']).version
            n_addr = info['neigh_addr'].split('/')[0]
            if version == 6:
                c << 'no bgp default ipv4-unicast'

            c << 'neighbor {0} remote-as {1}'.format(n_addr, peer.asn)
            if info['is_rs_client']:
                c << 'neighbor {0} route-server-client'.format(n_addr)
            for typ, p in info['policies'].iteritems():
                c << 'neighbor {0} route-map {1} {2}'.format(
                    n_addr, p['name'], typ)
            if info['passwd']:
                c << 'neighbor {0} password {1}'.format(n_addr, info['passwd'])
            if info['passive']:
                c << 'neighbor {0} passive'.format(n_addr)
            if version == 6:
                c << 'address-family ipv6 unicast'
                c << 'neighbor {0} activate'.format(n_addr)
                c << 'exit-address-family'

        for route in chain.from_iterable(self.routes.itervalues()):
            if route['rf'] == 'ipv4':
                c << 'network {0}'.format(route['prefix'])
            elif route['rf'] == 'ipv6':
                c << 'address-family ipv6 unicast'
                c << 'network {0}'.format(route['prefix'])
                c << 'exit-address-family'
            else:
                raise Exception('unsupported route faily: {0}'.format(
                    route['rf']))

        if self.zebra:
            if version == 6:
                c << 'address-family ipv6 unicast'
                c << 'redistribute connected'
                c << 'exit-address-family'
            else:
                c << 'redistribute connected'

        for name, policy in self.policies.iteritems():
            c << 'access-list {0} {1} {2}'.format(name, policy['type'],
                                                  policy['match'])
            c << 'route-map {0} permit 10'.format(name)
            c << 'match ip address {0}'.format(name)
            c << 'set metric {0}'.format(policy['med'])

        c << 'debug bgp as4'
        c << 'debug bgp fsm'
        c << 'debug bgp updates'
        c << 'debug bgp events'
        c << 'log file {0}/bgpd.log'.format(self.SHARED_VOLUME)

        with open('{0}/bgpd.conf'.format(self.config_dir), 'w') as f:
            print colors.yellow('[{0}\'s new bgpd.conf]'.format(self.name))
            print colors.yellow(indent(str(c)))
            f.writelines(str(c))
Exemplo n.º 44
0
def setup_network():
    print(yellow('Launching docker network...'))
    with lcd('.'):
        local('docker network create --driver bridge {project_name}-network'
              ''.format(project_name=project_name))
Exemplo n.º 45
0
def deploy(id=None, silent=False, force=False, auto_nginx=True):
    """ Perform an automatic deploy to the target requested. """
    require('hosts')
    require('code_dir')

    if force:
        force = colors.blue('FORCED DEPLOY')

        print '-' * 40
        print force
        print '-' * 40

    # Ask for sudo at the beginning so we don't fail during deployment because of wrong pass
    if not sudo('whoami'):
        abort('Failed to elevate to root')
        return

    # Show log of changes, return if nothing to do
    revset = show_log(id)
    if not revset and not force:
        return

    # See if we have any requirements changes
    requirements_changes = force or vcs.changed_files(revset, r' requirements/')
    if requirements_changes:
        print colors.yellow("Will update requirements (and do migrations)")

    # See if we have changes in app source or static files
    app_patterns = [r' eline/app', r' eline/static',
                    r' eline/settings', r' eline/package.json']
    app_changed = force or vcs.changed_files(revset, app_patterns)
    if app_changed:
        print colors.yellow("Will run npm build")

    # See if we have any changes to migrations between the revisions we're applying
    migrations = force or migrate_diff(revset=revset, silent=True)
    if migrations:
        print colors.yellow("Will apply %d migrations:" % len(migrations))
        print indent(migrations)

    # See if we have any changes to crontab config
    crontab_changed = force or vcs.changed_files(revset, r'deploy/crontab.conf')
    if crontab_changed:
        print colors.yellow("Will update cron entries")

    # See if we have any changes to letsencrypt configurations
    letsencrypt_changed = force or vcs.changed_files(revset, get_config_modified_patterns('letsencrypt'))
    if letsencrypt_changed:
        print colors.yellow("Will update letsencrypt configurations")

    # see if nginx conf has changed
    nginx_changed = vcs.changed_files(revset, get_config_modified_patterns('nginx'))

    if nginx_changed:
        if auto_nginx:
            print colors.yellow("Nginx configuration change detected, updating automatically")

        else:
            print colors.red("Warning: Nginx configuration change detected, also run: `fab %target% nginx_update`")

    elif force:
        print colors.yellow("Updating nginx config")

    if not silent:
        request_confirm("deploy")

    vcs.update(id)

    ensure_docker_networks()
    docker_compose('build')

    collectstatic(npm_build=app_changed)

    if crontab_changed:
        with cd(env.code_dir):
            sudo('cp deploy/crontab.conf /etc/cron.d/eline')

    if migrations or requirements_changes:
        migrate(silent=True)

    # Run deploy systemchecks
    check()

    docker_up(silent=True)

    # Update nginx after bringing up container
    if force or (nginx_changed and auto_nginx):
        nginx_update()

    if force or letsencrypt_changed:
        letsencrypt_update()
Exemplo n.º 46
0
 def create_config(self):
     with open('{0}/bgpd.conf'.format(self.config_dir), 'w') as f:
         print colors.yellow('[{0}\'s new bgpd.conf]'.format(self.name))
         print colors.yellow(indent(self.config))
         f.writelines(self.config)
Exemplo n.º 47
0
def update():
    print yellow('Updating the site:')
    syncdb()
    compile_butter()
    update_index()
Exemplo n.º 48
0
def letsencrypt_configure(reconfigure_nginx=True):
    require('code_dir')

    domains = set()

    # Collect all the domains that need a certificate
    with cd(env.code_dir):
        # construct a configparser object
        config = ConfigParser.ConfigParser()

        for filename in get_config_repo_paths('letsencrypt'):
            buf = StringIO()

            # Add the actual config file data to the buffer
            get(filename, buf)

            # Here we prepend a section header to the in-memory buffer. This
            #  allows us to easily read the letsencrypt config file using stdlib configparser
            #
            # see: http://stackoverflow.com/questions/2819696/parsing-properties-file-in-python/25493615#25493615
            buf = StringIO('[DEFAULT]\n' + buf.getvalue())

            # read config from buf
            config.readfp(buf)

            # get domains from the config file
            for domain in config.get('DEFAULT', 'domains').split(','):
                domains.add(domain.strip())

    # Create a temporary nginx config file
    temporary_nginx_conf = """
        server {
            listen 80;
            server_name %(domains)s;
            location /.well-known/acme-challenge/ {
                root /etc/letsencrypt/www;
                break;
            }
        }
    """ % {
        "domains": " ".join(domains),
    }

    # Notify the user that the dns MUST be configured for all the domains as of this point
    print(" ")
    print(colors.blue('Preparing to request certificate using letsencrypt. The DNS for '
                      'following domains MUST be configured to point to the remote host: %s' % " ".join(domains)))

    if not confirm(colors.yellow("Is the dns configured? (see above)")):
        abort('Deployment aborted.')

    # Upload it to the app nginx config path
    put(local_path=StringIO(temporary_nginx_conf), remote_path=get_nginx_app_target_path(), use_sudo=True)

    # Reload nginx
    sudo('docker exec nginx nginx -s reload')

    # use letsencrypt_update to obtain the certificate
    letsencrypt_update(dry_run=True)

    # restore nginx config if requested
    if reconfigure_nginx:
        nginx_update()
Exemplo n.º 49
0
def copy_butter():
    print yellow('Coping Butter styling')
    with lcd(settings.PROJECT_ROOT):
        local('cp -r butter/dist/* assets/dist/')
        local('cp butter/dist/*.css assets/css/')
        local('cp butter/dist/*.js assets/js/')
Exemplo n.º 50
0
def collectstatic():
    compile_butter()
    print yellow('Collecting static files.')
    with lcd(settings.PROJECT_ROOT):
        local('python manage.py collectstatic --noinput')
Exemplo n.º 51
0
def compile_butter():
    print yellow('Compiling Butter files.')
    with lcd(os.path.join(settings.PROJECT_ROOT, 'butter')):
        local('VERSION=0.5 node make release')
    copy_butter()
Exemplo n.º 52
0
def update_index():
    print yellow('Updating search index')
    with lcd(settings.PROJECT_ROOT):
        local('python manage.py update_index')
Exemplo n.º 53
0
def deprecated(**kwargs):
    print(
        colors.red("WARNING: 'apply_pr' command has been deprecated and\n"
                   "  it will be deleted in future versions"))
    print(colors.yellow("> Use 'sastre deploy' instead"))
    return apply_pr(**kwargs)
Exemplo n.º 54
0
def syncdb():
    print yellow('Syncing the database')
    with lcd(settings.PROJECT_ROOT):
        local('python manage.py syncdb --noinput')
        local('python manage.py migrate --noinput')
Exemplo n.º 55
0
def test_integration():
    print(yellow('Be sure to run: "fab compile_js configure_prod run" first'))
    local("python ./assets/integration_test/test.py")
    local("python ./assets/integration_test/fuzztest.py")
    local("python ./assets/integration_test/signed_testing.py")
    local("python ./assets/integration_test/signed_testing_sha512.py")
Exemplo n.º 56
0
def update_npm():
    print yellow('Updating npm')
    with lcd(os.path.join(settings.PROJECT_ROOT, 'butter')):
        local('npm install')
        local('npm update')
Exemplo n.º 57
0
print(blue('= Summary of cluster information ='))
print('== NODE ==')
pprint(NODE)
print('== Fabric roles ==')
pprint(env.roledefs)
print('== ROLE ==')
pprint(ROLE)
print(blue('= End of summary ='))

#
# Debugging mode
#
# To enable it use: export NOOP=1
if os.environ.get('NOOP'):

    print(yellow('\n\n== Running in NOOP mode ==\n\n'))

    def run(name):
        print('[{0}] run: {1}'.format(env.host, name))

    def put(source, destination):
        print('[{0}] put: {1} {2}'.format(env.host, source, destination))

    @task
    @parallel
    def hostname():
        """Print the hostnames: mainly used for testing purposes"""
        run('/bin/hostname')


#
Exemplo n.º 58
0
def server_add_production_user(user):
    print yellow('\nAdding production user')
    sudo('adduser --disabled-password --gecos "" %s' % user)
    sudo('chown -R %s:%s /home/%s' % (user, user, user))
Exemplo n.º 59
0
def warn(msg, error_code=1):
    print("{}: {}".format(yellow("warning"), msg))
Exemplo n.º 60
0
def create_target_directories(dirs, perms, user):
    """Creates all directories in the list."""
    for name in dirs:
        if not dir_exists(name):
            show(colors.yellow("Creating missing directory: %s"), name)
            create_dir_with_perms(name, perms, user, user)