Пример #1
0
def put_rc_files():
    vimrc = '.vimrc'
    if run('test -f /home/' + USERNAME + '/' + vimrc).failed:
        put('~/' + vimrc, '~/' + vimrc)

        with shell_env(HOME='/home/' + USERNAME):
            sudo('cp ~vagrant/.vimrc ~')
            run('rm -f ~vagrant/.vimrc')
    else:
        print green('"%s" already exists' % vimrc)
        run('rm -f ~vagrant/.vimrc')

    gitconfig = '.gitconfig'
    if run('test -f /home/' + USERNAME + '/' + gitconfig).failed:
        put('~/' + gitconfig, '~/' + gitconfig)

        with shell_env(HOME='/home/' + USERNAME):
            sudo('cp ~vagrant/.gitconfig ~')
            run('rm -f ~vagrant/.gitconfig')
    else:
        print green('"%s" already exists' % gitconfig)
        run('rm -f ~vagrant/.gitconfig')

    inputrc = '.inputrc'
    if run('test -f /home/' + USERNAME + '/' + inputrc).failed:
        put('~/' + inputrc, '~/' + inputrc)

        with shell_env(HOME='/home/' + USERNAME):
            sudo('cp ~vagrant/.inputrc ~')
            run('rm -f ~vagrant/.inputrc')
    else:
        print green('"%s" already exists' % inputrc)
        run('rm -f ~vagrant/.inputrc')
Пример #2
0
def initial_setup():
    """ Initial database creation and fixtures creation """
    with shell_env(DJANGO_CONFIGURATION='Production'):
        try:
            choice = int(prompt('What database engine you plan to use? \n' +
                                'If you choose, Mysql or Postgres, you have to make sure they are' +
                                ' installed on your computer before proceeding further \n' +
                                '(1) Sqlite3 \n' +
                                '(2) MySql \n' +
                                '(3) Postgres (recommended) \n' +
                                ': '))

            if choice == 1:
                _sync_db()
            elif choice == 2:

                response = _db_questions(0, '3306')

                with shell_env(DATABASE_URL=response):
                    _install_mysql()
                    _sync_db()
            elif choice == 3:
                response = _db_questions(1, '5432')

                with shell_env(DATABASE_URL=response):
                    _install_postgres()
                    _sync_db()

        except ValueError:
            print 'Try again! You should enter a number.'

        local('python pillbox-engine/manage.py collectstatic --noinput')
Пример #3
0
def rls():
    with lcd("app"):
        with shell_env(DJANGO_SETTINGS_MODULE="website.settings.test"):
            with shell_env(DJANGO_SECRET_KEY='l2m12=spld!a^m30@%gkvk*)f*x(wh18v70ch04mpnnt%!-h7t'):
                local("python manage.py collectstatic --noinput")
                local("python manage.py migrate")
                local("python manage.py runserver 0.0.0.0:8002")
Пример #4
0
def deploy():
    """ Deploy the application """
    print_env_and_user()
    pull()
    install_requirements()
    with shell_env(WORKON_HOME='~/venvs'):
        run(django_manage(command='syncdb --noinput'))
    with shell_env(WORKON_HOME='~/venvs'):
        run(django_manage(command='migrate --all'))
    collectstatic()
    restart()
Пример #5
0
def sync_remote_to_local(force="no"):
    """
    Sync your remote postgres database with local

    Example:
        fabrik prod sync_remote_to_local
    """

    _check_requirements()

    if force != "yes":
        message = "This will replace your local database '%s' with the "\
            "remote '%s', are you sure [y/n]" % (env.local_psql_db, env.psql_db)
        answer = prompt(message, "y")

        if answer != "y":
            logger.info("Sync stopped")
            return

    init_tasks()  # Bootstrap fabrik

    # Create database dump
    remote_file = "postgresql/sync_%s.sql.tar.gz" % int(time.time()*1000)
    remote_path = paths.get_backup_path(remote_file)

    env.run("mkdir -p %s" % paths.get_backup_path("postgresql"))

    with context_managers.shell_env(PGPASSWORD=env.psql_password):
        env.run("pg_dump -h localhost -Fc -f %s -U %s %s -x -O" % (
            remote_path, env.psql_user, env.psql_db
        ))

    local_path = "/tmp/%s" % remote_file

    # Download sync file
    get(remote_path, local_path)

    # Import sync file by performing the following task (drop, create, import)
    with context_managers.shell_env(PGPASSWORD=env.local_psql_password):
        elocal("pg_restore --clean -h localhost -d %s -U %s '%s'" % (
            env.local_psql_db,
            env.local_psql_user,
            local_path)
        )

    # Cleanup
    env.run("rm %s" % remote_path)
    elocal("rm %s" % local_path)

    # Trigger hook
    run_hook("postgres.after_sync_remote_to_local")

    logger.info("Sync complete")
Пример #6
0
def compile_prod_css():
    log.warning('Calling killall,  all process will be killed!')
    killall()
    
    with  shell_env(APP_ENV=PROD_ENV):
        with cd("/var/www"):
            settings = parse_vars(run("./manage.py settings_vars STATIC_URL"))
            if 'STATIC_URL' not in settings:
                log.error('STATIC_URL lookup failed!!!')
                abort("STATIC_URL lookup failed!!!") 
        
    with shell_env(APP_ENV=PROD_ENV, STATIC_URL=settings['STATIC_URL']):
        css_compile()
    
    log.warning('Note killall was called so vagrant server/compass will be down')
Пример #7
0
    def run(self):
        with settings(warn_only=True):
            if self.extra_path() != "":
                with path(self.extra_path()):
                    with shell_env(**self.shell_env()):
                        self.pre_action()
                        result = self.run_command()
                        self.post_action(result)
            else:
                with shell_env(**self.env):
                    self.pre_action()
                    result = self.run_command()
                    self.post_action(result)

        return result
Пример #8
0
def deploy():
    with prefix('source $(which virtualenvwrapper.sh) && workon remote'):
        settings_file = '--settings=haxclub.settings.base'
        env_vars = config.get('env_vars')
        if not exists('~/haxclub'):
            with cd('~/'):
                run('git clone https://github.com/jsalva/haxclub')
        with cd('~/haxclub/haxclub'):
            if not exists('logs'):
                run('mkdir logs')
            run('git pull origin master')
            with shell_env(**env_vars):
                prompts = []
                prompts += expect("Type 'yes' to continue","yes")
                with expecting(prompts):
                    erun('python manage.py collectstatic %s' % settings_file)
                    erun('python manage.py migrate %s' % settings_file)
                    erun('python manage.py syncdb %s' % settings_file)
                    if exists('supervisord.pid'):
                        erun('python manage.py supervisor reload %s' % settings_file)
                    else:
                        erun('python manage.py supervisor --daemonize %s' % settings_file)

    if not exists('/tmp/nginx'):
        run('mkdir /tmp/nginx')

    put('nginx.conf','/etc/nginx/nginx.conf',use_sudo=True)
    put('nginx_haxclub.conf','/etc/nginx/conf.d/nginx_haxclub.conf',use_sudo=True)
    put('ssl/haxclub.key.nopass','/etc/ssl/certs/haxclub.key.nopass',use_sudo=True)
    put('ssl/haxclub.crt','/etc/ssl/certs/haxclub.crt',use_sudo=True)
    put('nginx_haxclub.conf','/etc/nginx/conf.d/nginx_haxclub.conf',use_sudo=True)
    sudo('service nginx stop; service nginx start;')
def manage(command):
    default_settings = '{{ project_name }}.settings.{0}'.format(env.environment)
    django_settings = env.get('django_settings', default_settings)

    with shell_env(DJANGO_SETTINGS_MODULE=django_settings):
        with cd(MANAGE_PATH), prefix(WORKON_ENV):
            run('python manage.py {}'.format(command))
Пример #10
0
def create_server(
        keypair_name,
        # m1.large
        flavor=u'4',
        # SC_Centos7
        image=u'ab32525b-f565-49ca-9595-48cdb5eaa794',
        # tmz-mdl-net1
        net_id=u'74632532-1629-44b4-a464-dd31657f46a3',
):
    """
    Run ``nova boot`` to create a new server on which to run the
    redhat-openstack build slave.

    :param str keypair_name: The name of an SSH keypair that has been
        registered on the redhat-openstack nova tenant.
    """
    with shell_env(OS_TENANT_NAME=TENANT_NAME):
        commandline = cmd(
            'nova', 'boot',
            '--image', image,
            '--flavor', flavor,
            '--nic', 'net-id=' + net_id,
            '--key-name', keypair_name,
            # SSH authentication fails unless this is included.
            '--config-drive', 'true',
            # Wait for the machine to become active.
            '--poll',
            BUILDSLAVE_NODENAME
        )

        run(commandline)
        run('nova list | grep {!r}'.format(BUILDSLAVE_NODENAME))
Пример #11
0
def deploy():

    target = "/tmp/%s.tar.gz" % uuid.uuid4()
    run("wget %s -O %s" % (TARBALL, target))

    run("mkdir -p %s" % PATH)

    with cd(PATH):

        with warn_only():
            if files.exists("uwsgi.pid"):
                run("%s --stop uwsgi.pid" % ENV_UWSGI)

        run("rm vmprof -rf ")
        run("mkdir vmprof")

        run("tar -xf %s -C vmprof --strip-components=1" % target)

        run("rm %s" % target)

        if not files.exists("virtualenv"):
            run("virtualenv virtualenv")

        run("%s install -r vmprof/requirements/production.txt" % ENV_PIP)

        run("mkdir -p vmprof/static")

        with shell_env(DJANGO_SETTINGS_MODULE='settings.production'):
            run("%s vmprof/manage.py collectstatic -c --noinput" % ENV_PYTHON)
            run("%s vmprof/manage.py migrate" % ENV_PYTHON)

        run("%s --ini vmprof/uwsgi.ini" % ENV_UWSGI)
Пример #12
0
def reset_database():
    with shell_env(SUPERLISTS_DB=environ['STAGING_DB'],
                   SUPERLISTS_DB_USERNAME=environ['STAGING_DB_USERNAME'],
                   SUPERLISTS_DB_PASSWORD=environ['STAGING_DB_PASSWORD']):
        run('{manage_py} flush --noinput'.format(
            manage_py=_get_manage_dot_py(env.host)
        ))
Пример #13
0
    def _sudo(self):
        fabric_env_vars = self.env_vars
        fabric_settings = self._get_settings()

        try:
            with shell_env(**fabric_env_vars), settings(**fabric_settings):
                output = sudo(self.command, combine_stderr=False, pty=True, quiet=True)
        except Exception:
            LOG.exception('Failed executing remote action.')
            result = self._get_error_result()
        else:
            result = {
                'stdout': output.stdout,
                'stderr': output.stderr,
                'return_code': output.return_code,
                'succeeded': output.succeeded,
                'failed': output.failed
            }
        finally:
            self._cleanup(settings=fabric_settings)

        # XXX: For sudo, fabric requires to set pty=True. This basically combines stdout and
        # stderr into a single stdout stream. So if the command fails, we explictly set stderr
        # to stdout and stdout to ''.
        if result['failed'] and result.get('stdout', None):
            result['stderr'] = result['stdout']
            result['stdout'] = ''

        return jsonify.json_loads(result, FabricRemoteAction.KEYS_TO_TRANSFORM)
Пример #14
0
def sys_firewall(config=None, **kwargs):
    # we have to put have a default ``INPUT ACCEPT`` Policy first
    # otherwise we don't have time to add the input rules for ssh and get totes borked
    sudo("""
        iptables -P INPUT ACCEPT; ip6tables -P INPUT ACCEPT
        iptables -P OUTPUT ACCEPT; ip6tables -P OUTPUT ACCEPT
        iptables -P FORWARD DROP; ip6tables -P FORWARD DROP
        iptables -F; ip6tables -F
        iptables -A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
        ip6tables -A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
        iptables -A INPUT -p tcp -m tcp --dport 22 -m conntrack --ctstate NEW -j ACCEPT
        iptables -A INPUT -i lo -j ACCEPT; ip6tables -A INPUT -i lo -j ACCEPT
        # Now replace the default input policy
        iptables -P INPUT DROP; ip6tables -P INPUT DROP
        """
    )
    for rule in getattr(config, 'IPTABLES_RULES', []):
        sudo('iptables %s' % rule)
        sudo('ip6tables %s' % rule)
    for port in getattr(config, 'OPEN_TCP_PORTS', []):
        sudo('ip6tables -A INPUT -p tcp -m tcp --dport %d -m conntrack --ctstate NEW -j ACCEPT' % int(port))
        sudo('iptables -A INPUT -p tcp -m tcp --dport %d -m conntrack --ctstate NEW -j ACCEPT' % int(port))
    sudo('iptables-save')
    sudo('ip6tables-save')
    with shell_env(DEBIAN_FRONTEND='noninteractive'):
        sudo('echo iptables-persistent iptables-persistent/autosave_v4 boolean true | debconf-set-selections')
        sudo('echo iptables-persistent iptables-persistent/autosave_v6 boolean true | debconf-set-selections')
        install_packages('iptables-persistent')
Пример #15
0
 def execute(self, cmd, *args, **kwargs):
     """
     Executes a fabric-navitia command
     :param cmd: the fabric command
     :param *args are passed to api.execute
     :param **kwargs are passed to api.execute, except 'let' which is
            passed to settings
     """
     let = kwargs.pop('let', {})
     shell_env = kwargs.pop('shell_env', None)
     self.log.info("Container '{}' exec fabric command {}({}, {})".format(
         self.container.container_name, cmd, args, kwargs))
     if '.' in cmd:
         command = fabfile
         for compo in cmd.split('.'):
             command = getattr(command, compo, command)
     else:
         command = getattr(fabfile, cmd, None)
     if not isinstance(command, tasks.WrappedCallableTask):
         raise RuntimeError("Unknown Fabric command %s" % cmd)
     cm = [context_managers.hide('stdout')]
     if shell_env:
         cm.append(context_managers.shell_env(**shell_env))
     with context_managers.settings(*cm, **let):
         api.execute(command, *args, **kwargs)
     return self
Пример #16
0
def solr_query():
    """ query solr """
    with shell_env(DOCKER_HOST=get_swarm_url()):
        print "demonstrate you can query either server and get a response:"
        response = run("docker exec -it --user=solr solr1 "
            "curl 'http://localhost:8983/solr/{}/select?q=maxtor&indent=true' | tr -d '\r' | grep -v '^$'".format(SOLR_COLLECTION))
        if 'numFound="1"' in response:
            print "got one found, as expected"
        else:
            print "none found!"
        run("docker exec -it --user=solr solr2 "
            "curl 'http://localhost:8983/solr/{}/select?q=maxtor&indent=true' | tr -d '\r' | grep -v '^$'".format(SOLR_COLLECTION))
        if 'numFound="1"' in response:
            print "got one found, as expected"
        else:
            print "none found!"

        print "demonstrate the response only comes from a single shard:"
        response1 = run("docker exec -it --user=solr solr1 "
            "curl 'http://localhost:8983/solr/{}/select?q=maxtor&indent=true&shards=localhost:8983/solr/{}_shard1_replica1' | tr -d '\r' | grep -v '^$'".format(SOLR_COLLECTION, SOLR_COLLECTION))
        response2 = run("docker exec -it --user=solr solr1 "
            "curl 'http://localhost:8983/solr/{}/select?q=maxtor&indent=true&shards=localhost:8983/solr/{}_shard2_replica1' | tr -d '\r' | grep -v '^$' ".format(SOLR_COLLECTION, SOLR_COLLECTION))
        if (('numFound="1"' in response1) or ('numFound="1"' in response2)) and not ('numFound="1"' in response1 and 'numFound="1"' in response2):
            print "found only in one shard, as expected"
        else:
            print "ehr?!"
Пример #17
0
def install_pmxbot():
	"Install pmxbot into a PEP-370 env at install_root"
	tmpl = 'python3 -m pip install --user {packages}'
	with shell_env(**install_env):
		usp = run('python3 -c "import site; print(site.getusersitepackages())"')
		sudo("mkdir -p {usp}".format(**locals()))
		sudo(tmpl.format_map(globals()))
Пример #18
0
def create_networks():
    """ create two example networks """
    etcd_address = env.cluster_address[env.roledefs['etcd'][0]]
    with shell_env(ETCD_AUTHORITY='{}:{}'.format(etcd_address, env.etcd_client_port)):
        run("docker network create --driver=overlay --subnet 192.168.91.0/24 " + NET_ALPHA_BETA)
        run("docker network create --driver=overlay --subnet 192.168.89.0/24 " + NET_SOLR)
        run("docker network ls")
Пример #19
0
def ping_test_containers():
    """ see if containers A and B can ping eachother """
    alpha_name = 'c-' + TEST_ALPHA
    beta_name = 'c-' + TEST_BETA
    with shell_env(DOCKER_HOST=get_swarm_url()):
        run("docker exec -i {} ping -c 1 {}.{}".format(alpha_name, beta_name, NET_ALPHA_BETA))
        run("docker exec -i {} ping -c 1 {}.{}".format(beta_name, alpha_name, NET_ALPHA_BETA))
Пример #20
0
def create_virtualenv(environment):
    with settings(warn_only=True):
        venv_location = os.path.join("/home", environment.USER_NAME, virtualenv_location, environment.VENV)
        result = run("test -d {0}".format(venv_location))
    if result.failed:
        with shell_env(WORKON_HOME=virtualenv_location):
            run("source /usr/local/bin/virtualenvwrapper.sh && mkvirtualenv %s" % environment.VENV)
Пример #21
0
def deploy(name):
    """
    Pull the latest code from remote
    """
    project_root, env = _app_paths(name)
    with cd(project_root):
        run('git pull', pty=False)

    environ = supervisor._get_environment(name)

    with cd(project_root), prefix('source %s/bin/activate' % env), hide('running'), shell_env(**environ):
        install_requirements(name)

        # initialize the database
        _info("./manage.py syncdb ... \n")
        run('python manage.py syncdb')

        # run south migrations
        _info("./manage.py migrate ... \n")
        run('python manage.py migrate', quiet=True)

        # collect static files
        _info("./manage.py collectstatic --noinput ... \n")
        run('python manage.py collectstatic --noinput')
        supervisor.restart(name)
Пример #22
0
def install_python():
    with shell_env(HOME='/home/' + USERNAME, PATH="/home/" + USERNAME + "/.pyenv/bin:$PATH"):
        # Install pyenv
        if sudo('test -d ~/.pyenv').failed:
            sudo('git clone https://github.com/yyuu/pyenv.git ~/.pyenv')
        else:
            print green('"pyenv" is already installed')

        if sudo('grep ".pyenv" ~/.bashrc > /dev/null').failed:
            pyenv_root = 'PYENV_ROOT="$HOME\/.pyenv"'
            path_str = 'PATH="$PYENV_ROOT\/bin:$PATH"'
            sudo('echo -e "\n# pyenv" >> ~/.bashrc')
            sudo('echo "export %s" >> ~/.bashrc')
            sudo("sed -i -e 's/%s/" + pyenv_root + "/g' ~/.bashrc")
            sudo('echo "export %s" >> ~/.bashrc')
            sudo("sed -i -e 's/%s/" + path_str + "/g' ~/.bashrc")
        else:
            print green('"pyenv PATH" is already written')

        if sudo('grep "pyenv init" ~/.bashrc > /dev/null').failed:
            py_str = '"$(pyenv init -)"'
            sudo('echo "eval %s" >> ~/.bashrc')
            sudo("sed -i -e 's/%s/" + py_str + "/g' ~/.bashrc")
        else:
            print green('"pyenv" init is already written')

        # Install Python
        python_ver = sudo("pyenv install -l | awk '{print $1}' | egrep --color=never '^2\.7\.[0-9.]+' | tail -1")    # 2.7.x
        if sudo('pyenv versions | grep --color=never "' + python_ver + '" > /dev/null').failed:
            sudo('pyenv install ' + python_ver)
        else:
            print green('"python %s" is already installed' % python_ver)
        sudo('pyenv global ' + python_ver)
        sudo('pyenv rehash')
Пример #23
0
def install_gems():
    with shell_env(HOME='/home/' + USERNAME, PATH="/home/" + USERNAME + "/.rbenv/bin:$PATH"):
        if sudo('test -f ~/.gemrc && grep "gem:" ~/.gemrc > /dev/null').failed:
            sudo('echo "gem: --no-ri --no-rdoc -V" >> ~/.gemrc')
        else:
            print green('".gemrc" already exists')

        gems = [
            'bundler',
            'pry',
            'rails',
            'rbenv-rehash',
            'rspec',
            'spring',
        ]

        not_installed = []
        installed = []
        for g in gems:
            if sudo("eval \"$(rbenv init -)\" && gem list | awk '{print $1}' | egrep '^" + g + "$' > /dev/null").failed:
                not_installed.append(g)
            else:
                installed.append(g)
        if len(installed) > 0:
            print green('"%s" is already installed') % ', '.join(installed)
        if len(not_installed) > 0:
            print yellow('"%s" is not installed') % ', '.join(not_installed)
            sudo('eval "$(rbenv init -)" && gem install ' + ' '.join(not_installed))

        sudo('rbenv rehash')
Пример #24
0
 def _run_server(self):
     #try:
         #pid = local('ps -C node | grep log.io-server | cut -d" " -f6', capture=True)
     #except:
     if not is_running(local, 'log.io-server'):
         with shell_env(NODE_PATH=self.node_path):
             local('log.io-server')
Пример #25
0
 def deploy():
     docker_host = 'tcp://%s:%d' % (env['host'], options.port)
     with shell_env(DOCKER_TLS_VERIFY="1",
                    DOCKER_HOST=docker_host):
         local('docker-compose %s' % options.extra)
         pass
     pass
Пример #26
0
def delete_server():
    """
    Call ``nova delete`` to delete the server on which the redhat-openstack
    build slave is running.
    """
    with shell_env(OS_TENANT_NAME=TENANT_NAME):
        run('nova delete ' + BUILDSLAVE_NODENAME)
Пример #27
0
def install_pip_requirements():
    with cd(env.code_path):
        with shell_env(PIP_DOWNLOAD_CACHE='~/.pip_download_cache'):
            with prefix(env.venv_prefix):
                print(colors.green("Installing dependencies"))
                run('{proxy_command} pip install -r '
                    'setup/requirements/production.txt'.format(**env))
Пример #28
0
def install():
    # Could, in theory, rebuild virtual env here as well. Currently just update
    # pip & migrate
    with prefix('source ~/.virtualenvs/tracker/bin/activate'), cd('~/tracker/'):
        run('pip install -r requirements.txt')
        with shell_env(DJANGO_SETTINGS_MODULE='mood_tracker.settings.base'):
            run('python manage.py migrate')
Пример #29
0
def update_refinery():
    """Perform full update of a Refinery Platform instance"""
    puts("Updating Refinery")
    with cd(env.refinery_project_dir):
        # if in Vagrant update current branch, otherwise checkout custom branch
        if env.project_user != 'vagrant':
            run("git checkout {branch}".format(**env))
        # avoid explaining automatic merge commits with both new and old git
        # versions running on different VMs
        # https://raw.githubusercontent.com/gitster/git/master/Documentation/RelNotes/1.7.10.txt
        with shell_env(GIT_MERGE_AUTOEDIT='no'):
            run("git pull".format(**env))
    with cd(env.refinery_ui_dir):
        run("npm prune --progress false")
        run("npm update --progress false")
        run("rm -rf bower_components")
        run("bower update --config.interactive=false")
        run("grunt make")
    with prefix("workon {refinery_virtualenv_name}".format(**env)):
        run("pip install -r {refinery_project_dir}/requirements.txt"
            .format(**env))
        run("find . -name '*.pyc' -delete")
        run("{refinery_app_dir}/manage.py migrate --noinput --fake-initial"
            .format(**env))
        run("{refinery_app_dir}/manage.py collectstatic --clear --noinput"
            .format(**env))
        run("supervisorctl reload")
    with cd(env.refinery_project_dir):
        run("touch {refinery_app_dir}/config/wsgi_*.py".format(**env))
Пример #30
0
def test_python(apps=_default_tests, pytest=True):
    """ Run Python tests. """

    # .pyc files can contain filepaths; this permits easy switching
    # between a Vagrant- and Docker-based dev environment
    local("find . -name '*.pyc' -delete")

    # In order to run functional_tests, we have to run collectstatic, since functional tests use DEBUG=False
    # For speed we use the default Django STATICFILES_STORAGE setting here, which also has to be set in settings_testing.py
    if "functional_tests" in apps and not os.environ.get('SERVER_URL'):
        local("DJANGO__STATICFILES_STORAGE=django.contrib.staticfiles.storage.StaticFilesStorage python manage.py collectstatic --noinput")

    # temporarily set MEDIA_ROOT to a tmp directory, in a way that lets us clean up after ourselves
    tmp = tempfile.mkdtemp()
    try:
        shell_envs = {
            'DJANGO__MEDIA_ROOT': tmp
        }
        with shell_env(**shell_envs):
            # all arguments to Fabric tasks are interpretted as strings
            if pytest == 'False':
                local("coverage run manage.py test --settings perma.settings.deployments.settings_testing %s" % (apps))
            else:
                local("pytest %s --ds=perma.settings.deployments.settings_testing --cov --cov-report= " % (apps))
    finally:
        # clean up after ourselves
        shutil.rmtree(tmp)
Пример #31
0
def migrate(mode=DEFAULT_MODE,
            deploy_to=DEFAULT_DEPLOY_TO,
            env_variables=None,
            setup=False,
            reset_db=False,
            generate_dummy_data=True,
            create_super_user=True):
    """
    Perform migrations.

    Options
    -------
        mode [DEFAULT_MODE]
        deploy_to [DEFAULT_DEPLOY_TO]
        reset_db [False]
            If True, delete the database.
        generate_dummy_data [True]
            Generate dummy data (see app/management/)
        createsuperuser [True]
            If True and reset_db is True, create admin super user.
    """
    if not env_variables:
        env_variables = _get_env_variables(mode=mode)

    print('\nMigrating database as user django')

    with shell_env(**env_variables):
        with cd(DJANGO_PROJECT_PATH):
            if reset_db:
                with settings(warn_only=True):
                    run('rm -rf app/migrations')
                with settings(warn_only=True):
                    sudo("chmod -R 700 app"
                         )  # rwx --- --- : django can write new migrations

            env.user = '******'
            env.password = DJANGO_PASS
            # get django database pass, this is kind of hacky but wtv
            private_settings = _get_private_settings(deploy_to=deploy_to)
            django_db_pass = private_settings.DB_PASS

            print('> Checking database backend')
            with settings(
                    prompts={"Login password for 'django': ": django_db_pass}):
                run('echo "from django.db import connection; connection.vendor" | python manage.py shell'
                    )

            with settings(
                    prompts={"Login password for 'django': ": django_db_pass}):
                if reset_db:
                    print('> Deleting database')
                    if mode == 'dev':
                        run('rm -rf %s/db.sqlite3' % (DJANGO_PROJECT_NAME))
                    else:
                        with settings(warn_only=True):
                            run('rm -rf app/migrations')
                        run('python manage.py sqlclear app | python manage.py dbshell '
                            )

                run('python manage.py makemigrations')

                if setup:
                    run('python manage.py migrate --fake-initial')
                    run('python manage.py makemigrations app')
                    run('python manage.py migrate app')
                elif reset_db:
                    run('python manage.py migrate --fake')
                    run('python manage.py makemigrations app')
                    run('python manage.py migrate --fake-initial')
                    run('python manage.py migrate')
                else:
                    run('python manage.py migrate')
            if mode == 'dev' or reset_db:
                if generate_dummy_data or reset_db:
                    run('python manage.py generate_models 3 --reset')
            env.user = '******'

            if mode == 'prod':
                print('> Checking postgresql status')
                run('service postgresql status')
                run('sudo netstat -nl | grep postgres')

            print('> Creating super user with login admin/pass')
            with settings(warn_only=True):
                with hide('stderr', 'stdout', 'warnings'):
                    sudo('chmod u+x scripts/createsuperuser.sh')
                    run('./scripts/createsuperuser.sh')
Пример #32
0
def update_gunicorn_config():
    with cd(DEPLOY_DIR.format(**env)):
        with shell_env(SITENAME=env.host):
            run("sed -e s/'$SITENAME'/$SITENAME/g gunicorn_start_template.sh > gunicorn_start.sh"
                )
Пример #33
0
def migrate():
    with shell_env(DATABASE_PASSWORD='******'):
        local('python {}/manage.py migrate'.format(env.project_name))
Пример #34
0
def deploy_container():
    with cd("infrastructure"):
        with shell_env(POSTGRES_PASSWORD=os.environ.get("POSTGRES_PASSWORD")):
            run("docker-compose stop sblog")
            run("docker-compose pull sblog")
            run("docker-compose up -d sblog")
Пример #35
0
def shell():
    kwarg = _check_env()
    with shell_env(**kwarg):
        local('python pillbox-engine/manage.py shell')
Пример #36
0
def pip_env():
    return shell_env(
        PYTHON_EGG_CACHE='/home/{0}/.python-eggs'.format(config.user))
Пример #37
0
 def run_cmd(cmd):
     with shell_env(OS_AUTH_URL=lab.cloud.end_point,
                    OS_USERNAME=lab.cloud.user,
                    OS_PASSWORD=lab.cloud.password,
                    OS_TENANT_NAME=lab.cloud.tenant):
         return server.exe(cmd).stdout
Пример #38
0
def build(aptitude_package_recipient_ansible_role_version,
          automater_ansible_role_version,
          automation_agent_ansible_role_version,
          automation_server_ansible_role_version,
          aws_api_client_ansible_role_version,
          aws_ssh_server_ansible_role_version, deployer_ansible_role_version,
          container_factory_ansible_role_version,
          frontend_project_builder_ansible_role_version,
          ssrn_system_simulator_ansible_role_version,
          java_project_builder_ansible_role_version,
          long_running_host_ansible_role_version,
          browser_test_runner_ansible_role_version,
          clock_synchronization_host_ansible_role_version):
    with lcd(get_fabric_file_directory_path()):
        with shell_env(
                APTITUDE_PACKAGE_RECIPIENT_ANSIBLE_ROLE_VERSION=
                aptitude_package_recipient_ansible_role_version,
                AUTOMATER_ANSIBLE_ROLE_VERSION=automater_ansible_role_version,
                AUTOMATION_AGENT_ANSIBLE_ROLE_VERSION=
                automation_agent_ansible_role_version,
                AUTOMATION_SERVER_ANSIBLE_ROLE_VERSION=
                automation_server_ansible_role_version,
                AWS_API_CLIENT_ANSIBLE_ROLE_VERSION=
                aws_api_client_ansible_role_version,
                AWS_SSH_SERVER_ANSIBLE_ROLE_VERSION=
                aws_ssh_server_ansible_role_version,
                DEPLOYER_ANSIBLE_ROLE_VERSION=deployer_ansible_role_version,
                CONTAINER_FACTORY_ANSIBLE_ROLE_VERSION=
                container_factory_ansible_role_version,
                FRONTEND_PROJECT_BUILDER_ANSIBLE_ROLE_VERSION=
                frontend_project_builder_ansible_role_version,
                SSRN_SYSTEM_SIMULATOR_ANSIBLE_ROLE_VERSION=
                ssrn_system_simulator_ansible_role_version,
                JAVA_PROJECT_BUILDER_ANSIBLE_ROLE_VERSION=
                java_project_builder_ansible_role_version,
                LONG_RUNNING_HOST_ANSIBLE_ROLE_VERSION=
                long_running_host_ansible_role_version,
                BROWSER_TEST_RUNNER_ANSIBLE_ROLE_VERSION=
                browser_test_runner_ansible_role_version,
                CLOCK_SYNCHRONIZATION_HOST_ANSIBLE_ROLE_VERSION=
                clock_synchronization_host_ansible_role_version):
            local('envsubst '
                  '\${APTITUDE_PACKAGE_RECIPIENT_ANSIBLE_ROLE_VERSION},'
                  '\${AUTOMATER_ANSIBLE_ROLE_VERSION},'
                  '\${AUTOMATION_AGENT_ANSIBLE_ROLE_VERSION},'
                  '\${AUTOMATION_SERVER_ANSIBLE_ROLE_VERSION},'
                  '\${AWS_API_CLIENT_ANSIBLE_ROLE_VERSION},'
                  '\${AWS_SSH_SERVER_ANSIBLE_ROLE_VERSION},'
                  '\${DEPLOYER_ANSIBLE_ROLE_VERSION},'
                  '\${CONTAINER_FACTORY_ANSIBLE_ROLE_VERSION},'
                  '\${FRONTEND_PROJECT_BUILDER_ANSIBLE_ROLE_VERSION},'
                  '\${SSRN_SYSTEM_SIMULATOR_ANSIBLE_ROLE_VERSION},'
                  '\${JAVA_PROJECT_BUILDER_ANSIBLE_ROLE_VERSION},'
                  '\${LONG_RUNNING_HOST_ANSIBLE_ROLE_VERSION},'
                  '\${BROWSER_TEST_RUNNER_ANSIBLE_ROLE_VERSION},'
                  '\${CLOCK_SYNCHRONIZATION_HOST_ANSIBLE_ROLE_VERSION} '
                  '< fabfile-deployment.py > fabfile-deployment.py.rendered '
                  '&& mv fabfile-deployment.py.rendered fabfile-deployment.py')

        local('rm -rf build')
        local('mkdir -p build')
        local(
            'tar --create --gzip --dereference --verbose --file build/deployment.tgz '
            '--transform "s|^fabfile-deployment.py$|fabfile.py|" '
            'fabfile-deployment.py infrastructure')
Пример #39
0
def serve():
    with shell_env(DATABASE_PASSWORD='******'):
        local('python {}/manage.py runserver'.format(env.project_name))
Пример #40
0
def create_logs_folder(project_name):
    home_folder = '/home/{project_name}'.format(project_name=project_name)
    with cd(home_folder), settings(sudo_user=project_name), shell_env(
            HOME=home_folder):
        if not exists('logs'):
            sudo('mkdir logs')
Пример #41
0
def format_namenode():
    '''Formats namenode on node1'''
    with shell_env(JAVA_HOME='/usr/java/default'):
        sudo('/opt/hadoop/bin/hdfs namenode -format vagrant -nonInteractive',
             warn_only=True)
Пример #42
0
def build(stage_name=None):
    ''' Build the application. '''
    with shell_env(STAGE=(stage_name or stage)):
        npm.run('build')
Пример #43
0
def test():
    with shell_env(TOKEN='123:df', LOGLEVEL='DEBUG',
                   DEFAULT_CHANNEL='-123456'):
        local('PYTHONPATH="$(pwd):$PYTHONPATH" python -m unittest discover')
Пример #44
0
def deploy_aips():
    """
    Construct a minimal AIPS installation that can run the Obit pipeline.
    This code is stolen from AIPSLite- and made to work with fabric 
    """

    #Delete old aips installation as this seems to conflict when updating
    remove_dir(AIPS_DIR)

    aips_server = 'ftp.aoc.nrao.edu'
    # Minimum files required:
    intel_libs = [
        AIPS_VERSION + '/LNX64/LIBR/INTELCMP/libimf.so',
        AIPS_VERSION + '/LNX64/LIBR/INTELCMP/libsvml.so'
    ]
    popsdat_files = [AIPS_VERSION + '/HELP/POPSDAT.HLP']
    binary_files = [AIPS_VERSION + '/LNX64/LOAD/FILAIP.EXE']

    make_directory(AIPS_DIR)
    # rsync the basic AIPS files
    rsync(aips_server,
          intel_libs + popsdat_files + binary_files,
          output_base=AIPS_DIR + '/' + AIPS_VERSION)
    #Sort out FILAIP
    data_dir = AIPS_DIR + '/' + AIPS_VERSION + '/DATA'
    mem_dir = AIPS_DIR + '/' + AIPS_VERSION + '/LNX64/MEMORY'
    template_dir = AIPS_DIR + '/' + AIPS_VERSION + '/LNX64/TEMPLATE'
    for temp_dir in [data_dir, mem_dir, template_dir]:
        make_directory(temp_dir)
    #Run FILAIP
    env = {
        'DA00': template_dir,
        'NET0': template_dir,
        'DA01': data_dir,
        'NVOL': '1',
        'NEWMEM': mem_dir,
        'LD_LIBRARY_PATH':
        AIPS_DIR + '/' + AIPS_VERSION + '/LNX64/LIBR/INTELCMP/',
        'AIPS_VERSION': AIPS_DIR + '/' + AIPS_VERSION,
        'AIPS_ROOT': AIPS_DIR,
        'VERSION': 'NEW',
        'NEW': AIPS_DIR + '/' + AIPS_VERSION
    }
    with (shell_env(**env)):
        run('echo 8 2 | ' + AIPS_DIR + '/' + AIPS_VERSION +
            '/LNX64/LOAD/FILAIP.EXE')
    # Download Tasks
    exe_files = [
        AIPS_VERSION + '/LNX64/LOAD/' + taskname + '.EXE'
        for taskname in AIPS_TASKS
    ]
    hlp_files = [
        AIPS_VERSION + '/HELP/' + taskname + '.HLP' for taskname in AIPS_TASKS
    ]
    rsync(aips_server,
          exe_files + hlp_files,
          output_base=AIPS_DIR + '/' + AIPS_VERSION)

    # AIPS needs environment variables set up in katimrc
    files.sed('/var/kat/k7contpipe/katimrc',
              'aips_dir = *',
              'aips_dir = ' + AIPS_DIR,
              use_sudo=True)
    files.sed('/var/kat/k7contpipe/katimrc',
              'aips_version = *',
              'aips_version = ' + AIPS_VERSION,
              use_sudo=True)
Пример #45
0
def _virtualenv():
    with cd(env.directory + "/tests"):
        with prefix(env.activate):
            with shell_env(**REMOTE_ENV_VARS):
                yield
Пример #46
0
data = {
    'instance.gopath': '/opt/go_workspace',
    'instance.goroot': '/opt/go'
}

redis = j.atyourservice.new(name='go', parent=vmMaster, args=data)
redis.consume('node', vmMaster.instance)
redis.install(deps=True)

# if this is not a clean machine make sure to stop agentcontroller8

cl.run('ays stop -n agentcontroller8')

# running tests.
with shell_env(GOROOT='/opt/go', GOPATH='/opt/go_workspace', PATH='PATH=$PATH:/opt/go/bin'):
    cl.run('go get -u -t -f github.com/Jumpscale/agent8')
    cl.run('go get -u -t -f github.com/Jumpscale/agentcontroller8')

    cl.run('go test -v github.com/Jumpscale/agent8/tests')


# installing controller and client from @ys
data = {
    'instance.param.redis.host': 'localhost:6379',
    'instance.param.redis.password': '',
    'instance.param.webservice.host': ':8966'
}

controller = j.atyourservice.new(name='agentcontroller8', parent=vmMaster, args=data)
controller.consume('node', vmMaster.instance)
Пример #47
0
def test():
    """Run unittest in local"""

    with shell_env(FLASK_APP='wsgi.py', FLASK_DEBUG="1"):
        local("python tests.py")
Пример #48
0
def workon_dezede(settings_module='dezede.settings.prod'):
    set_env()
    with cd(f'{env.project_path}'):
        with path(f"{env.virtual_env / 'bin'}", behavior='prepend'):
            with shell_env(DJANGO_SETTINGS_MODULE=settings_module):
                yield
Пример #49
0
    def _run_script_with_settings(self):
        fabric_env_vars = self.env_vars
        fabric_settings = self._get_settings()

        with shell_env(**fabric_env_vars), settings(**fabric_settings):
            return self._run_script()
Пример #50
0
def debug():
    """Run in debug mode in local"""

    with shell_env(FLASK_APP='wsgi.py', FLASK_DEBUG="1"):
        local("flask run")
Пример #51
0
def prepare_frontend():
    """Prepare and build the frontend."""
    with lcd(LOCAL_FRONTEND), shell_env(
            NODE_ENV="production"), section("Preparing frontend"):
        local("npm run -s clean:dist")
        local("npm run -s build")
            if len(line) > 44:
                r2 = tmpfile
            i = i + 1

        cipher_suite = Fernet(r1)
        AWSSID = cipher_suite.decrypt(
            exec_remote_cmd('cat %s' % (r2)).split()[0])
        AWSSECRET = cipher_suite.decrypt(
            exec_remote_cmd('cat %s' % (r2)).split()[1])
        AWSACCOUNT = cipher_suite.decrypt(
            exec_remote_cmd('cat %s' % (r2)).split()[2])
        AWSREGION = cipher_suite.decrypt(
            exec_remote_cmd('cat %s' % (r2)).split()[3])

        with shell_env(AWS_ACCESS_KEY_ID=AWSSID,
                       AWS_SECRET_ACCESS_KEY=AWSSECRET,
                       AWS_ACCOUNT_ID=AWSACCOUNT,
                       AWS_DEFAULT_REGION=AWSREGION):
            cp_result = exec_remote_cmd(
                'aws s3 cp s3://%s/%s_%i.txt ~/' %
                (params['s3'], instanceListFileBasename, instanceCounter))
            if cp_result.failed:
                print 'Error: movie list copy failed.'
            print 'aws s3 cp s3://%s/%s_%i.txt ~/' % (
                params['s3'], instanceListFileBasename, instanceCounter)
        instanceCounter = instanceCounter + 1
'''
    #Fabric copying: http://stackoverflow.com/questions/5314711/how-do-i-copy-a-directory-to-a-remote-machine-using-fabric

    ###########
    #Cleanup###
    ###########
Пример #53
0
def client_exec(cmd, **kwargs):
    with shell_env():
        return run('sudo su root -c "mysql -e \\"{}\\""'.format(
            cmd.format(**kwargs)),
                   shell=False)
Пример #54
0
 def remote_host(self):
     with nested(self.remote_tunnel(), shell_env(**self.env)):
         yield
Пример #55
0
 def execute_cmd_with_proxy(self, cmd):
     if self.inputs.http_proxy:
         with shell_env(http_proxy=self.inputs.http_proxy):
             sudo(cmd)
     else:
         sudo(cmd)
Пример #56
0
    def _run_tests_(self, should_email=False):
        aws_src = os.getenv('OCGIS_SIMPLEAWS_SRC')
        aws_conf = os.getenv('OCGIS_CONF_PATH')
        aws_testing_section = 'aws-testing'

        ebs_volumesize = int(parser.get(aws_testing_section, 'ebs_volumesize'))
        ebs_snapshot = parser.get(aws_testing_section, 'ebs_snapshot')
        ebs_mount_name = parser.get(aws_testing_section, 'ebs_mount_name')
        ebs_placement = parser.get(aws_testing_section, 'ebs_placement')
        test_results_path = parser.get(aws_testing_section,
                                       'test_results_path')
        test_instance_name = parser.get(aws_testing_section,
                                        'test_instance_name')
        test_instance_type = parser.get(aws_testing_section,
                                        'test_instance_type')
        test_image_id = parser.get(aws_testing_section, 'test_image_id')
        dest_email = parser.get(aws_testing_section, 'dest_email')
        dir_clone = parser.get('server', 'dir_clone')
        key_name = parser.get('simple-aws', 'key_name')

        import sys
        sys.path.append(aws_src)
        import saws
        import ipdb

        am = saws.AwsManager(aws_conf)

        self.log.info('launching instance')
        instance = am.launch_new_instance(test_instance_name,
                                          image_id=test_image_id,
                                          instance_type=test_instance_type,
                                          placement=ebs_placement)

        with settings(host_string=instance.ip_address,
                      disable_known_hosts=True,
                      connection_attempts=10):
            try:
                self.log.info('creating volume')
                volume = am.conn.create_volume(ebs_volumesize,
                                               ebs_placement,
                                               snapshot=ebs_snapshot)
                am.wait_for_status(volume, 'available')
                try:
                    self.log.info('attaching volume')
                    am.conn.attach_volume(volume.id,
                                          instance.id,
                                          ebs_mount_name,
                                          dry_run=False)
                    am.wait_for_status(volume, 'in-use')

                    ebs_mount()

                    if self.launch_pause == 'true':
                        self.log.info(
                            'pausing. continue to terminate instance...')
                        msg = 'ssh -i ~/.ssh/{0}.pem ubuntu@{1}'.format(
                            key_name, instance.public_dns_name)
                        self.log.info(msg)
                        ipdb.set_trace()
                    else:
                        path = os.path.join(dir_clone,
                                            parser.get('git', 'name'))
                        test_target = os.path.join(path, 'src', 'ocgis',
                                                   'test')
                        # test_target = os.path.join(path, 'src', 'ocgis', 'test', 'test_simple')
                        nose_runner = os.path.join(path, 'fabfile',
                                                   'nose_runner.py')
                        path_src = os.path.join(path, 'src')
                        with cd(path):
                            fcmd(run, ['git', 'pull'])
                            fcmd(run, ['git', 'checkout', self.branch])
                            fcmd(run, ['git', 'pull'])
                        with cd(path_src):
                            with shell_env(OCGIS_TEST_TARGET=test_target):
                                fcmd(run, ['python', nose_runner])
                                if self.path_local_log is not None:
                                    get(test_results_path,
                                        local_path=self.path_local_log)

                    ebs_umount()

                finally:
                    self.log.info('detaching volume')
                    volume.detach(force=True)
                    am.wait_for_status(volume, 'available')
                    self.log.info('deleting volume')
                    volume.delete()
            finally:
                self.log.info('terminating instance')
                instance.terminate()

        if should_email and self.launch_pause == 'false' and self.path_local_log is not None:
            self.log.info('sending email')
            with open(self.path_local_log, 'r') as f:
                content = f.read()
            am.send_email(dest_email, dest_email, 'OCGIS_AWS', content)

        self.log.info('success')
Пример #57
0
def build(outdir=None, device_sdk=None, simulator_sdk=None, **kwargs):
    """
    Build card.io SDK.
    """
    print(colors.white("Setup", bold=True))

    to_hide = [] if env.verbose else ["stdout", "stderr", "running"]

    xcode_preprocessor_flags = {}

    if not outdir:
        message = """
                     You must provide outdir=<sdk output parent dir>
                     Example usage:
                       `fab build:outdir=~` - normal build
                       `fab build:outdir=~,SCAN_EXPIRY=0` - to disable the experimental expiry-scan feature
                  """
        abort(textwrap.dedent(message).format(**locals()))

    if _confirm_ready_for_release("assets/strings"):
        sys.exit(1)

    outdir = os.path.abspath(os.path.expanduser(outdir))
    print colors.yellow(
        "Will save release sdk to {outdir}".format(outdir=outdir))
    out_subdir = "card.io_ios_sdk_{0}".format(_version_str(show_dirty=True))

    xcode_preprocessor_flags.update(kwargs)
    formatted_xcode_preprocessor_flags = " ".join(
        "{k}={v}".format(k=k, v=v)
        for k, v in xcode_preprocessor_flags.iteritems())
    extra_xcodebuild_settings = "GCC_PREPROCESSOR_DEFINITIONS='$(value) {formatted_xcode_preprocessor_flags}'".format(
        **locals())

    device_sdk = device_sdk or "iphoneos"
    simulator_sdk = simulator_sdk or "iphonesimulator"

    arch_to_sdk = (("armv7", device_sdk), ("armv7s", device_sdk), ("arm64",
                                                                   device_sdk),
                   ("i386", simulator_sdk), ("x86_64", simulator_sdk))

    with settings(hide(*to_hide)):
        icc_root = local("git rev-parse --show-toplevel", capture=True)

    temp_dir = tempfile.mkdtemp() + os.sep
    atexit.register(shutil.rmtree, temp_dir, True)

    print(colors.white("Preparing dmz", bold=True))
    with settings(hide(*to_hide)):
        with lcd(os.path.join(icc_root, "dmz")):
            dmz_all_filename = os.path.join("dmz", "dmz_all.cpp")
            with open(dmz_all_filename) as f:
                old_dmz_all = f.read()
            local("fab concat")
            with open(dmz_all_filename) as f:
                new_dmz_all = f.read()
            if old_dmz_all != new_dmz_all:
                print(
                    colors.red("WARNING: dmz_all.h was not up to date!",
                               bold=True))

    print(colors.white("Building", bold=True))
    print(colors.white("Using temp dir {temp_dir}".format(**locals())))
    print(
        colors.white(
            "Using extra Xcode flags: {formatted_xcode_preprocessor_flags}".
            format(**locals())))
    print(
        colors.white("Using developer directory: {}".format(
            env.developer_dir)))

    with lcd(icc_root):
        with shell_env(DEVELOPER_DIR=env.developer_dir):
            with settings(hide(*to_hide)):
                lipo_build_dirs = {}
                build_config = "Release"
                arch_build_dirs = {}
                for arch, sdk in arch_to_sdk:
                    print(
                        colors.blue("({build_config}) Building {arch}".format(
                            **locals())))

                    base_xcodebuild_command = "xcrun xcodebuild OTHER_CFLAGS='-fembed-bitcode' -target CardIO -arch {arch} -sdk {sdk} -configuration {build_config}".format(
                        **locals())

                    clean_cmd = "{base_xcodebuild_command} clean".format(
                        **locals())
                    local(clean_cmd)

                    build_dir = os.path.join(temp_dir, build_config, arch)
                    arch_build_dirs[arch] = build_dir
                    os.makedirs(build_dir)
                    parallelize = "" if env.verbose else "-parallelizeTargets"  # don't parallelize verbose builds, it's hard to read the output
                    build_cmd = "{base_xcodebuild_command} {parallelize} CONFIGURATION_BUILD_DIR={build_dir}  {extra_xcodebuild_settings}".format(
                        **locals())
                    local(build_cmd)

                print(
                    colors.blue("({build_config}) Lipoing".format(**locals())))
                lipo_dir = os.path.join(temp_dir, build_config, "universal")
                lipo_build_dirs[build_config] = lipo_dir
                os.makedirs(lipo_dir)
                arch_build_dirs["universal"] = lipo_dir
                # in Xcode 4.5 GM, xcrun selects the wrong lipo to use, so circumventing xcrun for now :(
                lipo_cmd = "`xcode-select -print-path`/Toolchains/XcodeDefault.xctoolchain/usr/bin/lipo " \
                           "           {armv7}/{libname}" \
                           "           -arch armv7s {armv7s}/{libname}" \
                           "           -arch arm64 {arm64}/{libname}" \
                           "           -arch i386 {i386}/{libname}" \
                           "           -arch x86_64 {x86_64}/{libname}" \
                           "           -create" \
                           "           -output {universal}/{libname}".format(libname=env.libname, **arch_build_dirs)
                local(lipo_cmd)

                print(
                    colors.blue(
                        "({build_config}) Stripping debug symbols".format(
                            **locals())))
                strip_cmd = "xcrun strip -S {universal}/{libname}".format(
                    libname=env.libname, **arch_build_dirs)
                local(strip_cmd)

                out_subdir_suffix = "_".join("{k}-{v}".format(k=k, v=v)
                                             for k, v in kwargs.iteritems())
                if out_subdir_suffix:
                    out_subdir_suffix = "_" + out_subdir_suffix
                out_subdir += out_subdir_suffix
                sdk_dir = os.path.join(outdir, out_subdir)

                print(
                    colors.white("Assembling release SDK in {sdk_dir}".format(
                        sdk_dir=sdk_dir),
                                 bold=True))
                if os.path.isdir(sdk_dir):
                    shutil.rmtree(sdk_dir)
                cardio_dir = os.path.join(sdk_dir, "CardIO")
                os.makedirs(cardio_dir)

                header_files = glob.glob(
                    os.path.join("CardIO_Public_API", "*.h"))
                _copy(header_files, cardio_dir)

                libfile = os.path.join(lipo_build_dirs["Release"], env.libname)

                shutil.copy2("{libfile}".format(libfile=libfile), ".")
                zip_cmd = "zip {libname}.zip {libname}".format(
                    libname=env.libname)
                local(zip_cmd)
                os.remove("{libname}".format(libname=env.libname))
                shutil.move("{libname}.zip".format(libname=env.libname),
                            cardio_dir)

                release_dir = os.path.join(icc_root, "Release")
                shutil.copy2(os.path.join(release_dir, "release_notes.txt"),
                             sdk_dir)
                shutil.copy2(os.path.join(release_dir, "CardIO.podspec"),
                             sdk_dir)
                shutil.copy2(os.path.join(release_dir, "acknowledgments.md"),
                             sdk_dir)
                shutil.copy2(os.path.join(release_dir, "LICENSE.md"), sdk_dir)
                shutil.copy2(os.path.join(release_dir, "README.md"), sdk_dir)
                shutil.copytree(os.path.join(release_dir, "SampleApp"),
                                os.path.join(sdk_dir, "SampleApp"),
                                ignore=shutil.ignore_patterns(".DS_Store"))
                shutil.copytree(os.path.join(release_dir, "SampleApp-Swift"),
                                os.path.join(sdk_dir, "SampleApp-Swift"),
                                ignore=shutil.ignore_patterns(".DS_Store"))
Пример #58
0
def develop():
    env_vars = settings.get('env', {})
    if settings.get('targets') and settings['targets'].get('local'):
        env_vars.update(settings['targets']['local'].get('env', {}))
    with shell_env(**env_vars):
        local('meteor')
Пример #59
0
 def execute_cmd_with_proxy(self, cmd, do_local=False):
     if self.inputs.http_proxy:
         with shell_env(http_proxy=self.inputs.http_proxy):
             local(cmd) if do_local else sudo(cmd)
     else:
         local(cmd) if do_local else sudo(cmd)
Пример #60
0
def deploy(schema_path, nids, exps, runfiles, fmt):
    nid = iter(nids[env.host])
    exp = iter(exps[env.host])
    runfile = iter(runfiles[env.host])
    succeeded = True
    with shell_env(SCHEMA_PATH=schema_path):
        with settings(warn_only=True, command_timeout=MAX_TIME_PER_EXP):
            #            if env.same_node:
            cmd = ''
            for r in env.roledefs["servers"]:
                if r == env.host:
                    nn = nid.next()
                    rfile = runfile.next()
                    args = get_args(fmt, exp.next())
                    if env.shmem:
                        cmd += "(/dev/shm/{}rundb -nid{} {}>> /dev/shm/results{}.out 2>&1 &);".format(
                            rfile, nn, args, nn)
#                        cmd += "(/dev/shm/rundb -nid{} >> /dev/shm/results{}.out 2>&1 &);".format(nn,nn)
                    else:
                        cmd += "(./{}rundb -nid{} {}>> results{}.out 2>&1 &);".format(
                            rfile, nn, args, nn)
            for r in env.roledefs["clients"]:
                if r == env.host:
                    nn = nid.next()
                    rfile = runfile.next()
                    args = get_args(fmt, exp.next())
                    if env.shmem:
                        cmd += "(/dev/shm/{}runcl -nid{} {}>> /dev/shm/results{}.out 2>&1 &);".format(
                            rfile, nn, args, nn)
                    else:
                        cmd += "(./{}runcl -nid{} {}>> results{}.out 2>&1 &);".format(
                            rfile, nn, args, nn)
#            for r in env.roledefs["sequencer"]:
#                if r == env.host:
#                    nn = nid.next()
#                    args = get_args(fmt,exp.next())
#                    if env.shmem:
#                        cmd += "(/dev/shm/runsq -nid{} {}>> /dev/shm/results{}.out 2>&1 &);".format(nn,args,nn)
#                    else:
#                        cmd += "(./runsq -nid{} {}>> results{}.out 2>&1 &);".format(nn,args,nn)

            cmd = cmd[:-3]
            cmd += ")"
            try:
                res = run("echo $SCHEMA_PATH")
                if not env.dry_run:
                    run(cmd)
                else:
                    print(cmd)
            except CommandTimeout:
                pass
            except NetworkError:
                pass


#            else:
#                if env.host in env.roledefs["servers"]:
#                    nn = nid.next();
#                    cmd = "./rundb -nid{} >> results{}.out 2>&1".format(nn,nn)
#                elif env.host in env.roledefs["clients"]:
#                    nn = nid.next();
#                    cmd = "./runcl -nid{} >> results{}.out 2>&1".format(nn,nn)
#                elif "sequencer" in env.roledefs and env.host in env.roledefs["sequencer"]:
#                    nn = nid.next();
#                    cmd = "./runsq -nid{} >> results{}.out 2>&1".format(nn,nn)
#                else:
#                    with color('error'):
#                        puts("host does not belong to any roles",show_prefix=True)
#                        puts("current roles:",show_prefix=True)
#                        puts(pprint.pformat(env.roledefs,depth=3),show_prefix=False)
#
#                try:
#                    res = run("echo $SCHEMA_PATH")
#                    if not env.dry_run:
#                        run(cmd)
#                except CommandTimeout:
#                    pass
#                except NetworkError:
#                    pass
    return True