Example #1
0
def delete():
    if not confirm("Are you sure you want to delete the bucket %r?"
            % bucket_name):
        abort('Aborting at user request.')
    conn = connect_s3()
    conn.delete_bucket(bucket_name)
    print 'Bucket %r deleted.' % bucket_name
def deploy_code():

    def check_if_code_exists():
        """ Check if a branch or tag exists in the user's local repo. """
        # Check if the branch exists
        with settings(warn_only=True):
            if local('git show-ref --heads --tags %s' % env.code).failed:
                return False
            else:
                return True

    def get_checked_out_branch_or_tag():
        return run('git name-rev --name-only HEAD')

    # Deploy the code
    with cd(env.code_dir):
        if not check_if_code_exists():
            abort('The "%s" %s does not exist in your repo!'
                  % (env.code, env.code_type))
        if not confirm('Current checked out %s is "%s", proceed with \
deploying "%s"?' % (env.code_type, get_checked_out_branch_or_tag(), env.code)):
            abort('Canceled deployment')
        run('git fetch')
        run('git fetch --tags')
        run('git checkout %s' % env.code)
        if env.code_type == 'branch':
            run('git pull origin %s' % env.code)
Example #3
0
def dump_database(database, path='/var/backups/postgres', filename='', format='plain', port=None):
    """
    Generate a dump database to a remote destination path
    Example::

        import fabtools

        fabtools.postgres.dump_database('myapp', path='/var/backups/postgres', filename='myapp-backup.sql')
        # If not filename specified will be saved with the date file format: database-201312010000.sql
        fabtools.postgres.dump_database('myapp', path='/var/backups/postgres') 
        # If not path specified will be saved at '/var/backups/postgres'
        fabtools.postgres.dump_database('myapp')
        # You can scpecify the pg_dump's custom format (able to restore with pg_restore)
        fabtools.postgres.dump_database('myapp', format='custom')

    """
    command_options = []
    port_option = _port_option(port)

    if port_option is not None:
        command_options.append(port_option)

    command_options = ' '.join(command_options)

    if fabtools.files.is_dir(path):
        if database_exists(database):
                date = _date.today().strftime("%Y%m%d%H%M")
                if not filename:
                    filename = '%(database)s-%(date)s.sql' % locals()
                dest = quote(posixpath.join(path, filename))
                _run_as_pg('pg_dump %(command_options)s %(database)s --format=%(format)s --blobs --file=%(dest)s' % locals())
        else:
            abort('''Database does not exist: %(database)s''' % locals())
    else:
        abort('''Destination path does not exist: %(path)s''' % locals())
Example #4
0
def validate_branch(branch):
    if branch == "override":
        pass
    elif branch is None:
        abort("must specify a branch")
    elif branch not in PUBLISHED_BRANCHES:
        abort("must specify a published branch.")
Example #5
0
def release():
    """Performs a full release"""

    with cd(getcwd()):
        with msg("Creating env"):
            run("mkvirtualenv test")

        with msg("Building"):
            with prefix("workon test"):
                run("fab develop")

        with msg("Running tests"):
            with prefix("workon test"):
                run("fab test")

        with msg("Building docs"):
            with prefix("workon test"):
                run("pip install -r docs/requirements.txt")
                run("fab docs")

        version = run("python setup.py --version")
        if "dev" in version:
            abort("Detected Development Version!")

        print("Release version: {0:s}".format(version))

        if prompt("Is this ok?", default="Y", validate=r"^[YyNn]?$") in "yY":
            run("git tag {0:s}".format(version))
            run("python setup.py egg_info sdist bdist_egg bdist_wheel register upload")
            run("python setup.py build_sphinx upload_sphinx")

        with msg("Destroying env"):
            run("rmvirtualenv test")
def build(treeish='head'):
    """Build a release."""
    version = local("git describe {}".format(treeish), capture=True)

    with settings(hide('warnings'), warn_only=True):
        cmd = "git diff-index --quiet {} --".format(treeish)
        is_committed = local(cmd).succeeded
        cmd = "git branch -r --contains {}".format(version)
        is_pushed = local(cmd, capture=True)

    if not is_committed:
        prompt = "Uncommitted changes. Continue?"
        if not confirm(prompt, default=False):
            abort("Canceled.")

    if not is_pushed:
        prompt = "Commit not pushed. Continue?"
        if not confirm(question=prompt, default=False):
            abort("Canceled.")

    output = "/tmp/{}.tar.gz".format(version)
    prefix = "{}/".format(version)
    cmd = "git archive --prefix={prefix} --format=tar.gz --output={output} {version}:src"
    local(cmd.format(prefix=prefix, output=output, version=version))
    puts("\nBuilt: {} at: {}".format(version, output))
    return output
Example #7
0
def _standby_clone():
    """ With "node1" server running, we want to use the clone standby
    command in repmgr to copy over the entire PostgreSQL database cluster
    onto the "node2" server. """
    # manualy:
    # $ mkdir -p /var/lib/postgresql/9.1/testscluster/
    # $ rsync -avz --rsh='ssh -p2222' [email protected]:/var/lib/postgresql/9.1/testscluster/ /var/lib/postgresql/9.1/testscluster/

    with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):
        puts(green('Start cloning the master'))
        repmgr_clone_command = 'repmgr -D %(slave_pgdata_path)s -d %(sync_db)s -p %(cluster_port)s -U %(sync_user)s -R postgres --verbose standby clone %(pgmaster_ip)s' % env
        puts(green(repmgr_clone_command))
        puts("-" * 40)
        res = sudo(repmgr_clone_command, user='******')
        if 'Can not connect to the remote host' in res or 'Connection to database failed' in res:
            puts("-" * 40)
            puts(green(repmgr_clone_command))
            puts("-" * 40)
            puts("Master server is %s reachable." % red("NOT"))
            puts("%s you can try to CLONE the slave manually [%s]:" % (green("BUT"), red("at your own risk")))
            puts("On the slave server:")
            puts("$ sudo -u postgres rsync -avz --rsh='ssh -p%(master_ssh_port)s' postgres@%(pgmaster_ip)s:%(master_pgdata_path)s %(slave_pgdata_path)s --exclude=pg_xlog* --exclude=pg_control --exclude=*.pid" % env)
            puts("Here:")
            puts("$ fab <cluster_task_name> finish_configuring_slave")
            abort("STOP...")
def upload(tag=False):
    """Upload new zip files.

    @param tag  Use git tag instead of hash?
    """
    suffix = get_suffix(tag)
    base_zip = "bootstrap.zip"
    suffix_zip = "bootstrap-%s.zip" % suffix

    if not (os.path.exists(base_zip) and os.path.exists(suffix_zip)):
        abort("Did not find current zip files. Please create.")

    # Check if existing downloads
    github = GitHub()
    dl_dict = dict((x['name'], x) for x in github.downloads())
    dl_suffix = dl_dict.get(suffix_zip)
    dl_base = dl_dict.get(base_zip)

    if dl_suffix is not None:
        print("Found suffixed zip file already. Skipping")
        return

    if dl_base is not None:
        print("Removing current base zip file.")
        result = github.downloads_del(dl_base)
        print("Result: %s" % json.dumps(result, indent=2))

    print("Upload new base zip file.")
    result = github.downloads_put(base_zip, suffix)
    print("\nResult: %s" % json.dumps(result, indent=2))

    print("Upload new suffixed zip file.")
    result = github.downloads_put(suffix_zip, suffix)
    print("\nResult: %s" % json.dumps(result, indent=2))
Example #9
0
def vcs_upload():
    """
    Uploads the project with the selected VCS tool.
    """
    if env.deploy_tool == "git":
        remote_path = "ssh://%s@%s%s" % (env.user, env.host_string,
                                         env.repo_path)
        if not exists(env.repo_path):
            run("mkdir -p %s" % env.repo_path)
            with cd(env.repo_path):
                run("git init --bare")
        local("git push -f %s master" % remote_path)
        with cd(env.repo_path):
            run("GIT_WORK_TREE=%s git checkout -f master" % env.proj_path)
            run("GIT_WORK_TREE=%s git reset --hard" % env.proj_path)
    elif env.deploy_tool == "hg":
        remote_path = "ssh://%s@%s/%s" % (env.user, env.host_string,
                                          env.repo_path)
        with cd(env.repo_path):
            if not exists("%s/.hg" % env.repo_path):
                run("hg init")
                print(env.repo_path)
            with fab_settings(warn_only=True):
                push = local("hg push -f %s" % remote_path)
                if push.return_code == 255:
                    abort()
            run("hg update")
Example #10
0
def safe_reboot():
    """Reboot a mongo machine, stepping down if it is the primary"""
    import vm
    if not vm.reboot_required():
        print("No reboot required")
        return

    while True:
        if cluster_is_ok():
            break
        sleep(5)
        print("Waiting for cluster to be okay")

    primary = _find_primary()
    if primary == 'No primary currently elected':
        return primary

    if i_am_primary(primary):
        execute(step_down_primary)

    for i in range(5):
        if cluster_is_ok() and not i_am_primary():
            break
        sleep(1)

    if not cluster_is_ok() or i_am_primary():
        abort("Cluster has not recovered")

    execute(vm.reboot, hosts=[env['host_string']])
Example #11
0
def deploy():
    print green("Begining update...")
    print ""

    print blue('Checking pre-requisites...')

    print cyan("Checking for local changes...")
    has_changes = local("git status --porcelain", capture=True)
    if REQUIRE_CLEAN and has_changes:
        abort(red("Your working directory is not clean."))

    print cyan("Ensuring remote working area is clean...")
    GIT_CMD = "git --work-tree={0} --git-dir={0}/.git".format(DEPLOY_PATH)
    has_changes = run(GIT_CMD + " status --porcelain")
    if has_changes:
        abort(red("Remote working directory is not clean."))

    print blue("Finished checking pre-requisites.")
    print ""

    print green("Starting deployment...")
    print ""

    print green("Updating environment...")
    with cd(DEPLOY_PATH):
        print cyan("Pulling from master")
        run('git pull')

        sudo('reboot')
Example #12
0
def ensure_running(retries=15, wait=10):
    """Ensure cassandra is running on all nodes.
    Runs 'nodetool ring' on a single node continuously until it
    reaches the specified number of retries.

    INTENDED TO BE RUN ON ONE NODE, NOT ALL.
    """
    time.sleep(15)
    for attempt in range(retries):
        ring = StringIO(fab.run('JAVA_HOME={java_home} {nodetool_bin} ring'.format(
            java_home=config['java_home'], nodetool_bin=_nodetool_cmd())))
        broadcast_ips = [x.get('external_ip', x['internal_ip']) for x in config['hosts'].values()]
        nodes_up = dict((host,False) for host in broadcast_ips)
        for line in ring:
            for host in broadcast_ips:
                try:
                    if host in line and " Up " in line:
                        nodes_up[host] = True
                except UnicodeDecodeError:
                    # sometimes the operations on line will fail when it contains characters
                    # outside ascii range. we don't care; these lines can exist as long as
                    # we see the 'UP' lines that let us know a node is up.
                    pass
        for node,up in nodes_up.items():
            if not up:
                fab.puts("Node is not up (yet): %s" % node)
        if False not in nodes_up.values():
            fab.puts("All nodes available!")
            return
        fab.puts("waiting %d seconds to try again.." % wait)
        time.sleep(wait)
    else:
        fab.abort("Timed out waiting for all nodes to startup")
Example #13
0
def database(name, owner, template='template0', encoding='UTF8',
             locale='en_US.UTF-8', allow_restart=False):
    """
    Require a PostgreSQL database.

    ::

        from fabtools import require

        require.postgres.database('myapp', owner='dbuser')
    """

    locale_transform = lambda l: l.strip().lower().replace('-', '')

    if not database_exists(name):
        locales = map(
            locale_transform,
            run('locale -a').split()
        )
        if locale_transform(locale) not in locales:
            if not allow_restart:
                abort(
                    'New locale "{}" must be installed and '
                    'postgres must be restarted after that'.format(
                        locale
                    )
                )
            require_locale(locale)
            restarted(_service_name())

        create_database(name, owner, template=template, encoding=encoding,
                        locale=locale)
Example #14
0
def update():
    #  test configuration start
    if not test_configuration():
        if not console.confirm("Configuration test %s! Do you want to continue?" % red_bg('failed'), default=False):
            abort("Aborting at user request.")
    #  test configuration end
    _verify_sudo()
    if env.ask_confirmation:
        if not console.confirm("Are you sure you want to deploy in %s?" % red_bg(env.project.upper()), default=False):
            abort("Aborting at user request.")
    puts(green_bg('Start deploy...'))
    start_time = datetime.now()

    if not 'synced_projectdir' in env or not env.synced_projectdir:
        git_pull()
    # _install_requirements()
    # _upload_nginx_conf()
    # _upload_rungunicorn_script()
    # _upload_supervisord_conf()
    _prepare_django_project()
    _prepare_media_path()
    _supervisor_restart()

    end_time = datetime.now()
    finish_message = '[%s] Correctly deployed in %i seconds' % \
    (green_bg(end_time.strftime('%H:%M:%S')), (end_time - start_time).seconds)
    puts(finish_message)
Example #15
0
def staging():
    answer = prompt('Are you sure you want to DELETE ALL DATA on "{0}" and replace it with test data? (type "I am sure" to continue):'.format(env.host_string))

    if answer != 'I am sure':
        abort('Aborted!')

    run_fixtures('all')
Example #16
0
def create_ami(instance_id, name):
    """
    Create AMI image from specified instance

    The instance needs to be shutdown before the creation begin.
    """
    image_name = "{0}_{1}".format(name, datetime.now().strftime("%Y%m%d-%H%M"))

    conn = boto.connect_ec2()
    image_id = conn.create_image(instance_id=instance_id, name=image_name)
    puts("Creating AMI {0} for instance {1}".format(image_name, image_id))

    while True:
        puts('.', end='')
        sys.stdout.flush()

        image = conn.get_image(image_id)
        if image.state == 'available':
            break
        if image.state == "failed":
            abort("Error creating AMI for {0}".format(image_id))
        time.sleep(5.0)

    puts("\nImage {0} created".format(image_name))
    return image_id
Example #17
0
def git_seed(repo_path, commit=None, ignore_untracked_files=False):
    '''seed a remote git repository'''
    commit = _get_commit(commit)
    force = ('gitric_force_push' in env) and '-f' or ''

    dirty_working_copy = _is_dirty(commit, ignore_untracked_files)
    if dirty_working_copy and 'gitric_allow_dirty' not in env:
        abort(
            'Working copy is dirty. This check can be overridden by\n'
            'importing gitric.api.allow_dirty and adding allow_dirty to your '
            'call.')
    # initialize the remote repository (idempotent)
    run('git init %s' % repo_path)
    # silence git complaints about pushes coming in on the current branch
    # the pushes only seed the immutable object store and do not modify the
    # working copy
    run('GIT_DIR=%s/.git git config receive.denyCurrentBranch ignore' %
        repo_path)
    # a target doesn't need to keep track of which branch it is on so we always
    # push to its "master"
    with settings(warn_only=True):
        push = local(
            'git push git+ssh://%s@%s:%s%s %s:refs/heads/master %s' % (
                env.user, env.host, env.port, repo_path, commit, force))
    if push.failed:
        abort(
            '%s is a non-fast-forward\n'
            'push. The seed will abort so you don\'t lose information. '
            'If you are doing this\nintentionally import '
            'gitric.api.force_push and add it to your call.' % commit)
Example #18
0
def cmd(stackname, command=None, username=DEPLOY_USER, clean_output=False, concurrency=None, node=None):
    if command is None:
        abort("Please specify a command e.g. ./bldr cmd:%s,ls" % stackname)
    LOG.info("Connecting to: %s", stackname)

    instances = _check_want_to_be_running(stackname)
    if not instances:
        return

    # take out the load of crap that Fabric prints mangling the useful output
    # of a remote command
    custom_settings = {}
    if clean_output:
        fabric.state.output['status'] = False
        fabric.state.output['running'] = False
        custom_settings['output_prefix'] = False

    try:
        with settings(**custom_settings):
            return stack_all_ec2_nodes(
                stackname,
                (run, {'command': command}),
                username=username,
                abort_on_prompts=True,
                concurrency=concurrency_for(stackname, concurrency),
                node=int(node) if node else None
            )
    except FabricException as e:
        LOG.error(e)
        exit(2)
Example #19
0
    def run(self, username=None, pubkey=None, as_root=False):
        if as_root:
            remote_user = '******'
            execute = run
        else:
            remote_user = env.local_user
            execute = sudo

        with settings(user=remote_user):
            keyfile = Path(pubkey or Path('~', '.ssh', 'id_rsa.pub')).expand()

            if not keyfile.exists():
                abort('Public key file does not exist: %s' % keyfile)

            with open(keyfile, 'r') as f:
                pubkey = f.read(65535)

            username = username or prompt('Username: '******'s password: "******"%s\", \"password\")\'' % (password),
                             capture=True)

            for command in self.commands:
                execute(command.format(**locals()))
Example #20
0
def deploy(justPull=False):
    """
    Update the remote instance.

    Pull from git, update virtualenv, create static and restart gunicorn
    """
    is_this_initial = False
    if run("test -d %s/.git" % env.REPOSITORY_FOLDER,
           quiet=True).failed:  # destination folder to be created
        message = 'Repository folder doesn\'t exists on destination. Proceed with initial deploy?'
        if not confirm(message):
            abort("Aborting at user request.")
        else:
            initial_deploy()
            is_this_initial = True

    for secret in secrets_file_paths():
        if run("test -e %s" % posixpath.join(env.REPOSITORY_FOLDER, secret),
               quiet=True).failed:  # secrets missing
            message = 'Some secret doesn\'t exists on destination. Proceed with initial deploy?'
            send_secrets(ask=True)

    update_instance(do_update_requirements=is_this_initial or DO_REQUIREMENTS, justPull=justPull)

    restart()
Example #21
0
def validate_branch(branch):
    if branch == 'override':
        pass
    elif branch is None:
        abort('must specify a branch')
    elif branch not in PUBLISHED_BRANCHES:
        abort('must specify a published branch.')
Example #22
0
File: pypy.py Project: jMyles/braid
def install():
    arch = info.arch()
    if re.match('i?86', arch):
        arch = 'x86'
    pypyURL = pypyURLs.get(arch)
    pypyDir = pypyDirs.get(arch)
    if pypyURL is None or pypyDir is None:
        abort("Can't install pypy on unknown architecture.")

    sudo('/bin/mkdir -p /opt')
    if fails('/usr/bin/id {}'.format('pypy')):
        sudo('/usr/sbin/useradd --home-dir {} --gid bin '
             '-M --system --shell /bin/false '
             'pypy'.format(pypyDir))
    else:
        sudo('/usr/sbin/usermod --home {} pypy'.format(pypyDir))

    with cd('/opt'):

        for url in pypyURL, pipURL:
            sudo('/usr/bin/wget -nc {}'.format(url))
        sudo('/bin/tar xf {}'.format(path.basename(pypyURL)))
        sudo('~pypy/bin/pypy {}'.format(path.join('/opt/', path.basename(pipURL))), pty=False)
        sudo('~pypy/bin/pip install pyopenssl')
        sudo('~pypy/bin/pip install svn+svn://svn.twistedmatrix.com/svn/Twisted/trunk/')
Example #23
0
def setup():
    #  test configuration start
    if not test_configuration():
        if not console.confirm("Configuration test %s! Do you want to continue?" % red_bg('failed'), default=False):
            abort("Aborting at user request.")
    #  test configuration end
    if env.ask_confirmation:
        if not console.confirm("Are you sure you want to setup %s?" % red_bg(env.project.upper()), default=False):
            abort("Aborting at user request.")
    puts(green_bg('Start setup...'))
    start_time = datetime.now()

    _verify_sudo
    _install_dependencies()
    _create_django_user()
    _setup_directories()
    _git_clone()
    _install_virtualenv()
    _create_virtualenv()
    _install_gunicorn()
    _install_requirements()
    _upload_nginx_conf()
    _upload_rungunicorn_script()
    _upload_supervisord_conf()

    end_time = datetime.now()
    finish_message = '[%s] Correctly finished in %i seconds' % \
    (green_bg(end_time.strftime('%H:%M:%S')), (end_time - start_time).seconds)
    puts(finish_message)
Example #24
0
def json_output():
    if env.input_file is None or env.output_file is None:
        abort('[json]: you must specify input and output files.')

    with open(env.input_file, 'r') as f:
        document = f.read()

    doc = json.loads(document)
    
    if 'body' not in doc:
        pass
    else:
        text = doc['body'].encode('ascii', 'ignore')

        text = re.sub('<[^>]*>', '', text)
        text = re.sub('&#8220;', '"', text)
        text = re.sub('&#8221;', '"', text)
        text = re.sub('&#8216;', "'", text)
        text = re.sub('&#8217;', "'", text)
        text = re.sub('&#\d{4};', '', text)

        doc['text'] = ' '.join(text.split('\n')).strip()

        url = [ 'http://docs.mongodb.org', get_manual_path() ]
        url.extend(env.input_file.rsplit('.', 1)[0].split('/')[3:])

        doc['url'] = '/'.join(url)

    with open(env.output_file, 'w') as f:
        f.write(json.dumps(doc))
Example #25
0
def git_clone(repo):
    topsrcdir = repo_check(repo, check_path=False, workdir=False)

    try:
        repo_url = dict(env.CFG_INVENIO_REPOS)[repo]['repository']
    except KeyError:
        abort(red("Repository URL for %s not defined" % repo))

    basename = os.path.basename(topsrcdir)
    parent = os.path.dirname(topsrcdir)

    if os.path.exists(topsrcdir):
        res = confirm("Remove existing source code in %s ?" % topsrcdir)
        if not res:
            abort(red("Cannot continue") % env)
        else:
            local("rm -Rf %s" % topsrcdir)
    else:
        if not os.path.exists(parent):
            local("mkdir -p %s" % parent)

    ctx = {
        'basename': basename,
        'parent': parent,
        'topsrcdir': topsrcdir,
        'url': repo_url,
    }

    local("cd %(parent)s; git clone %(url)s %(basename)s " % ctx)
Example #26
0
def repo_setup(repo, ref):
    """ Clone repository """
    puts(cyan(">>> Setting up repository %s with ref %s..." % (repo, ref)))

    topsrcdir = repo_check(repo, check_path=False, workdir=False)
    workdir = repo_check(repo, check_path=False, workdir=True)
    gitdir = os.path.join(topsrcdir, '.git')

    if not os.path.exists(env.CFG_SRCDIR):
        res = confirm("Create repository root %s?" % env.CFG_SRCDIR)
        if not res:
            abort(red("Cannot continue") % env)
        else:
            local("mkdir -p %s" % env.CFG_SRCDIR)

    if not os.path.exists(gitdir) and os.path.exists(topsrcdir):
        res = confirm("Remove %s (it does not seem to be a git repository)?" % topsrcdir)
        if not res:
            abort(red("Cannot continue") % env)
        else:
            local("rm -Rf %s" % topsrcdir)

    if not os.path.exists(gitdir):
        git_clone(repo)
    if not os.path.exists(workdir):
        git_newworkdir(repo)
    git_checkout(repo, ref)
    repo_prepare(repo)
Example #27
0
    def config(self):
        self.nodetype = utils.to_list(self.nodetype)
        self.siteinfo = utils.to_list(self.siteinfo)

        if not self.nodetype or not self.siteinfo:
            raise exception.ConfigException(("Could not run YAIM: Bad "
                                             "nodetype or site-info."))

        with tempfile.NamedTemporaryFile("w+t",
                                         dir=config.CFG["yaim_path"],
                                         delete=True) as f:
            for si in self.siteinfo:
                f.write("source %s\n" % si)
            f.flush()

            api.info(("Creating temporary file '%s' with "
                      "content: %s" % (f.name, f.readlines())))

            # NOTE(orviz) Cannot use 'capture=True': execution gets
            # stalled (defunct)
            with context_managers.lcd(config.CFG["yaim_path"]):
                abort_exception_default = fabric_api.env.abort_exception
                fabric_api.env.abort_exception = exception.ConfigException
                try:
                    fabric_api.local("/opt/glite/yaim/bin/yaim -c -s %s -n %s"
                                     % (f.name, " -n ".join(self.nodetype)))
                except exception.ConfigException:
                    fabric_api.abort(api.fail(("YAIM execution failed. Check "
                                               "the logs at '/opt/glite/yaim/"
                                               "log/yaimlog'.")))
                api.info("YAIM configuration ran successfully.")
                fabric_api.env.abort_exception = abort_exception_default
Example #28
0
def _test_install(folder, box):
    """
    Test installation of cozy
    Start vagrant box
    Install cozy on this box
    Test if cozy is well installed
    """
    if not folder or not box:
        abort('_test_install : both parameters are needed')

    if not is_dir(folder):
        local('mkdir ' + folder)

    with lcd(folder):
        start_box(box)
        try:
            install_cozy()
            _test_status(5)
            _test_register()
            _test_bad_register()
            _test_install_app()
            _test_uninstall_app()
        except Exception, e:
            local('vagrant halt -f')
            traceback.print_exc()
            sys.exit(1)
        else:
Example #29
0
    def diff(self, old, new, rev1, rev2, user, passwd):
        if old == new and rev1 == rev2:
            abort('can not deploy a duplicated (version,revision):(%s,%s)' \
                      % (new.tag, rev1))

        if user != None and passwd != None:
            cmd = ['svn --username=%s --password=%s --non-interactive --trust-server-cert diff --summarize' % (user, passwd),
                   '--old=' + str(old),
                   '--new=' + str(new),
                  ]
        else:
            cmd = ['svn --non-interactive --trust-server-cert diff --summarize',
                   '--old=' + str(old),
                   '--new=' + str(new), 
                  ]

        if rev1 and rev2:
            cmd.append('-r%s:%s' % (rev1, rev2))

        svnlog = local(' '.join(cmd), capture=True).stdout.strip()
        if not svnlog:
            abort('no diff between two (version,revision):(%s,%s)(%s,%s)' \
                      % (old.tag, new.tag, rev1, rev2))

        print 'CHANGE HISTORY:'
        print svnlog
        print '-'*10
        return svnlog
Example #30
0
def deploy_static():
    if not os.path.exists(".build") and not confirm("Looks like there is no build. Continue anyway? [DANGEROUS]"):
        abort("Aborting at user request. Type \"fab build\" before deployment.")
        
    require('hosts', provided_by=[staging, prod])
    require('staticpath', provided_by=[staging,prod])
    _put_dir('static',  env.staticpath )
Example #31
0
import os
import imp
import datetime
from fabric.api import execute, task, env, abort, local

# this will print paramiko errors on stderr
import logging

logging.getLogger('paramiko.transport').addHandler(logging.StreamHandler())

# if the env variable teacup_config is set import the config file specified.
# the variable is set as follows: fab --set teacup_config=myconfig.py
# otherwise import the default config.py
try:
    if not os.path.isfile(env.teacup_config):
        abort('Specified config file %s does not exit' % env.teacup_config)

    # before loading config change to the directory of specified config file
    # this is needed to make execfile without absolute path in config file work
    # afterwards change back directory to current path
    curr_dir = os.getcwd()
    config_dir = os.path.dirname(env.teacup_config)
    if config_dir == '':
        config_dir = '.'
    os.chdir(config_dir)
    config = imp.load_source('config', env.teacup_config)
    os.chdir(curr_dir)

except AttributeError:
    import config
Example #32
0
def deploy():
    def docker_compose(command):
        with cd(PATH):
            with shell_env(CI_BUILD_REF_NAME=os.getenv(
                    'CI_BUILD_REF_NAME', 'master')):
                run('set -o pipefail; docker-compose %s | tee' % command)

    variables_set = True
    for var in VARIABLES + ('CI_BUILD_TOKEN',):
        if os.getenv(var) is None:
            variables_set = False
            print(red('ERROR: environment variable ' + var + ' is not set.'))
    if not variables_set:
        abort('Missing required parameters')
    with hide('commands'):
        run('rm -f "%s"' % ENV_FILE)
        append(ENV_FILE,
               ['%s=%s' % (var, val) for var, val in zip(
                   VARIABLES, map(os.getenv, VARIABLES))])


    run('cat /srv/gitlab-login | docker login --username %s %s --password-stdin  ' % (
        os.getenv('REGISTRY_USER', 'gitlab-ci-token'),
        os.getenv('CI_REGISTRY','registry.gitlab.com')))
    # run('docker login -u %s -p %s %s' % (os.getenv('REGISTRY_USER',
    #                                                'gitlab-ci-token'),
    #                                      os.getenv('CI_BUILD_TOKEN'),
    #                                      os.getenv('CI_REGISTRY',
    #                                                'registry.gitlab.com')))

    put('docker-compose.prod.yml', PATH)
    put('app.sh', PATH)
    run('cp ' + PATH + '/docker-compose.prod.yml ' + PATH + '/docker-compose.yml')
    run('rm -f ' + PATH + '/docker-compose.prod.yml')
    run('source %s' % ENV_FILE)
    docker_compose("pull")

    # run('docker stop $(docker ps -a -q)')
    # try:
    #     run("docker network disconnect pye_default pye_web_1 -f")
    # except:
    #     pass
    #
    # try:
    #     run("docker network disconnect pye_default $(docker ps -a --filter=\"name=_pye_web\" --format '{{.Names}}') -f")
    # except:
    #     pass

    # remove static volume

    docker_compose('-p boilerplate down')
    run('docker volume rm -f boilerplate_static_files')

    docker_compose('-p boilerplate up -d')

    def run_command_to_web(command):
        run("docker exec -it boilerplate_web_1 " + command)
        # try:
        #     run("docker exec -it  $(docker ps -a --filter=\"name=_pye_web\" --format '{{.Names}}') " + command)
        # except:
        #     pass
        # try:
        #     run("docker exec -it pye_web_1" + command)
        # except:
        #     pass

    with cd(PATH):
        try:
            run('./app.sh cert')
        except:
            pass
        run('./app.sh migrate')
        run('./app.sh collectstatic')
        # run_command_to_web('python manage.py migrate')
        # run_command_to_web('bash -c "cd app && npm i && npm run build"')
        # run_command_to_web('python manage.py collectstatic --noinput')

        # ssh letsencrypt

    run('docker kill -s HUP boilerplate_nginx_1')
Example #33
0
def test():
    with settings(warn_only=True):
        result = local("nosetests -v", capture=True)
    if result.failed and not confirm("Tests failed. Continue?"):
        abort("Aborted at user request.")
Example #34
0
def install():
    current_dir = os.path.abspath(os.curdir)

    # check hardware support
    dist = check_system_support()

    # install dependent packages
    with settings(hide('stdout'), warn_only=True):
        cmd = "sudo apt-get update"
        sudo(cmd)
    with settings(hide('running'), warn_only=True):
        cmd = "apt-get install --force-yes -y qemu-kvm libvirt-bin libglu1-mesa "
        cmd += "gvncviewer python-dev python-libvirt python-lxml python-lzma "
        cmd += "apparmor-utils libc6-i386 python-pip libxml2-dev libxslt1-dev apache2"
        if dist == "precise":
            cmd += " python-xdelta3"
            if sudo(cmd).failed:
                abort("Failed to install libraries")
        else:
            if sudo(cmd).failed:
                abort("Failed to install libraries")
            # Python-xdelta3 is no longer supported in Ubuntu 14.04 LTS.
            # But you can install deb of Ubunutu 12.04 at Ubuntu 14.04.
            with cd(current_dir):
                package_name = "python-xdelta3.deb"
                cmd = "wget http://mirrors.kernel.org/ubuntu/pool/universe/x/xdelta3/python-xdelta3_3.0.0.dfsg-1build1_amd64.deb -O %s" % package_name
                if sudo(cmd).failed:
                    abort("Failed to download %s" % package_name)
                if sudo("dpkg -i %s" % package_name).failed:
                    abort("Failed to install %s" % package_name)
                sudo("rm -rf %s" % package_name)

    # install python-packages
    with cd(current_dir):
        if sudo("pip install -r requirements.txt").failed:
            abort("Failed to install python libraries")
    #copy heatmap to webserver directory and set permissions
    if sudo("mkdir /var/www/html/heatmap").failed:
        print("Failed to mkdir /var/www/html/heatmap")
    if sudo("chmod 777 /var/www/html/heatmap").failed:
        print("Failed to set perms for heatmap directory")
    with cd(current_dir):
        sudo("cp -r heatmap/* /var/www/html/heatmap")

    # check bios.bin file
    bios_files = [
        "/usr/share/qemu/bios.bin", "/usr/share/qemu/vgabios-cirrus.bin"
    ]
    for bios_file in bios_files:
        if not os.path.exists(bios_file):
            filename = os.path.basename(bios_file)
            sudo("ln -s /usr/share/seabios/%s %s" % (filename, bios_file))

    # disable libvirtd from appArmor to enable custom KVM
    if sudo("aa-complain /usr/sbin/libvirtd").failed:
        abort("Failed to disable AppArmor for custom KVM")

    # add current user to groups (optional)
    username = env.get('user')
    if sudo("adduser %s kvm" % username).failed:
        abort("Cannot add user to kvm group")
    if sudo("adduser %s libvirtd" % username).failed:
        abort("Cannot add user to libvirtd group")

    # Check fuse support:
    #   qemu-kvm changes the permission of /dev/fuse, so we revert back the
    #   permission. This bug is fixed from udev-175-0ubuntu26
    #   Please see https://bugs.launchpad.net/ubuntu/+source/udev/+bug/1152718
    if sudo("chmod 1666 /dev/fuse").failed:
        abort("Failed to enable fuse for the user")
    if sudo("chmod 644 /etc/fuse.conf").failed:
        abort("Failed to change permission of fuse configuration")
    if sudo("sed -i 's/#user_allow_other/user_allow_other/g' /etc/fuse.conf"):
        abort("Failed to allow other user to access FUSE file")

    # install cloudlet package
    with cd(current_dir):
        # remove previous build directory
        with settings(hide('everything')):
            sudo("rm -rf ./build")
            sudo("pip uninstall --y elijah-provisioning")
        # install python package
        if sudo("python setup.py install").failed:
            abort("cannot install cloudlet library")
        # clean-up
        with settings(hide('everything')):
            sudo("rm -rf ./build")

    sys.stdout.write("[SUCCESS] VM synthesis code is installed\n")
Example #35
0
def create():
    """
    Creates the environment needed to host the project.
    The environment consists of: system locales, virtualenv, database, project
    files, SSL certificate, and project-specific Python requirements.
    """
    # Generate project locale
    locale = env.locale.replace("UTF-8", "utf8")
    with hide("stdout"):
        if locale not in run("locale -a"):
            sudo("locale-gen %s" % env.locale)
            sudo("update-locale %s" % env.locale)
            sudo("service postgresql restart")
            run("exit")

    # Create project path
    run("mkdir -p %s" % env.proj_path)

    # Set up virtual env
    run("mkdir -p %s" % env.venv_home)
    with cd(env.venv_home):
        if exists(env.proj_name):
            if confirm("Virtualenv already exists in host server: %s"
                       "\nWould you like to replace it?" % env.proj_name):
                run("rm -rf %s" % env.proj_name)
            else:
                abort()
        run("virtualenv %s" % env.proj_name)

    # Upload project files
    if env.deploy_tool in env.vcs_tools:
        vcs_upload()
    else:
        rsync_upload()

    # Create DB and DB user
    pw = db_pass()
    user_sql_args = (env.proj_name, pw.replace("'", "\'"))
    user_sql = "CREATE USER %s WITH ENCRYPTED PASSWORD '%s';" % user_sql_args
    psql(user_sql, show=False)
    shadowed = "*" * len(pw)
    print_command(user_sql.replace("'%s'" % pw, "'%s'" % shadowed))
    psql("CREATE DATABASE %s WITH OWNER %s ENCODING = 'UTF8' "
         "LC_CTYPE = '%s' LC_COLLATE = '%s' TEMPLATE template0;" %
         (env.proj_name, env.proj_name, env.locale, env.locale))

    # Set up SSL certificate
    if not env.ssl_disabled:
        conf_path = "/etc/nginx/conf"
        if not exists(conf_path):
            sudo("mkdir %s" % conf_path)
        with cd(conf_path):
            crt_file = env.proj_name + ".crt"
            key_file = env.proj_name + ".key"
            if not exists(crt_file) and not exists(key_file):
                try:
                    crt_local, = glob(join("deploy", "*.crt"))
                    key_local, = glob(join("deploy", "*.key"))
                except ValueError:
                    parts = (crt_file, key_file, env.domains[0])
                    sudo("openssl req -new -x509 -nodes -out %s -keyout %s "
                         "-subj '/CN=%s' -days 3650" % parts)
                else:
                    upload_template(crt_local, crt_file, use_sudo=True)
                    upload_template(key_local, key_file, use_sudo=True)

    # Install project-specific requirements
    upload_template_and_reload("settings")
    with project():
        if env.reqs_path:
            pip("-r %s/%s" % (env.proj_path, env.reqs_path))
        pip("gunicorn setproctitle psycopg2 "
            "django-compressor python-memcached")
        # Bootstrap the DB
        manage("createdb --noinput --nodata")
        python(
            "from django.conf import settings;"
            "from django.contrib.sites.models import Site;"
            "Site.objects.filter(id=settings.SITE_ID).update(domain='%s');" %
            env.domains[0])
        for domain in env.domains:
            python("from django.contrib.sites.models import Site;"
                   "Site.objects.get_or_create(domain='%s');" % domain)
        if env.admin_pass:
            pw = env.admin_pass
            user_py = ("from django.contrib.auth import get_user_model;"
                       "User = get_user_model();"
                       "u, _ = User.objects.get_or_create(username='******');"
                       "u.is_staff = u.is_superuser = True;"
                       "u.set_password('%s');"
                       "u.save();" % pw)
            python(user_py, show=False)
            shadowed = "*" * len(pw)
            print_command(user_py.replace("'%s'" % pw, "'%s'" % shadowed))

    return True
Example #36
0
def commit_pull_and_deploy(tag):
    # Clone the Docker configuration in a local temporary directory
    local_tmpdir = tempfile.mkdtemp(prefix='fab-deploy')
    local_compose_file = os.path.join(
        local_tmpdir, env.docker_git_compose_file)
    with lcd(local_tmpdir):
        local("git clone --quiet --depth=1 --branch='{}' '{}' .".format(
            env.docker_git_branch, env.docker_git_repo)
        )
        # Update the image tag used by Docker Compose
        image_name = '{}:{}'.format(DOCKER_HUB_REPO, tag)
        updated_compose_image = False
        with open(local_compose_file, 'r') as f:
            compose_file_lines = f.readlines()
        with open(local_compose_file, 'w') as f:
            for line in compose_file_lines:
                matches = re.match(DOCKER_COMPOSE_IMAGE_UPDATE_PATTERN, line)
                if not matches:
                    f.write(line)
                    continue
                else:
                    # https://docs.python.org/2/library/os.html#os.linesep
                    f.write('{prefix}{image_name}\n'.format(
                        prefix=matches.group(1), image_name=image_name)
                    )
                    updated_compose_image = True
        if not updated_compose_image:
            raise Exception(
                'Failed to update image to {} in Docker Compose '
                'configuration'.format(image_name)
            )
        # Did we actually make a change?
        if local('git diff', capture=True):
            # Commit the change
            local("git add '{}'".format(local_compose_file))
            local("git commit -am 'Upgrade {service} to {tag}'".format(
                service=SERVICE_NAME, tag=tag)
            )
            # Push the commit
            local('git show')
            response = prompt(
                'OK to push the above commit to {} branch of {}? (y/n)'.format(
                    env.docker_git_branch, env.docker_git_repo)
            )
            if response != 'y':
                abort('Push cancelled')
            local("git push origin '{}'".format(env.docker_git_branch))
        # Make a note of the commit to verify later that it's pulled to the
        # remote server
        pushed_config_commit = local("git show --no-patch", capture=True)

    # Deploy to the remote server
    with cd(env.docker_config_path):
        run('git pull')
        pulled_config_commit = run_no_pty("git show --no-patch")
        if pulled_config_commit != pushed_config_commit:
            raise Exception(
                'The configuration commit on the remote server does not match '
                'what was pushed locally. Please make sure {} is checked out '
                'on the remote server.'.format(env.docker_git_branch)
            )
        run_no_pty("{doco} pull '{service}'".format(
            doco=env.docker_compose_command, service=SERVICE_NAME)
        )
        run("{doco} up -d".format(doco=env.docker_compose_command))
Example #37
0
def test():
    with settings(warn_only=True):
        result = local("python ./manage.py test apps.blog", capture=True)
        if result.failed and not confirm("Test failed, Continue anyway"):
            abort("aborting at user request")
Example #38
0
def _check_rollback_to():
    if not env.release_path:
        abort('No release to rollback')
Example #39
0
def run_experiment(test_id='', test_id_pfx='', *args, **kwargs):

    do_init_os = kwargs.get('do_init_os', '1')
    ecn = kwargs.get('ecn', '0')
    tcp_cc_algo = kwargs.get('tcp_cc_algo', 'default')
    duration = kwargs.get('duration', '')
    if duration == '':
        abort('No experiment duration specified')

    # create sub directory for test id prefix
    mkdir_p(test_id_pfx)

    # remove <test_id>* files in <test_id_pfx> directory if exists
    file_pattern = test_id_pfx + "/" + test_id + "_*"

    for f in glob.glob(file_pattern):
        os.remove(f)

    # log experiment in started list
    local('echo "%s" >> experiments_started.txt' % test_id)

    puts('\n[MAIN] Starting experiment %s \n' % test_id)

    tftpboot_dir = ''
    try:
        tftpboot_dir = config.TPCONF_tftpboot_dir
    except AttributeError:
        pass

    # initialise
    if tftpboot_dir != '' and do_init_os == '1':
        execute(get_host_info,
                netint='0',
                hosts=config.TPCONF_router + config.TPCONF_hosts)
        execute(init_os_hosts, file_prefix=test_id_pfx,
                local_dir=test_id_pfx)  # reboot
        clear_type_cache()  # clear host type cache
        disconnect_all()  # close all connections
        time.sleep(30)  # give hosts some time to settle down (after reboot)

    # initialise topology
    try:
        switch = ''
        port_prefix = ''
        port_offset = 0
        try:
            switch = config.TPCONF_topology_switch
            port_prefix = config.TPCONF_topology_switch_port_prefix
            port_offset = config.TPCONF_topology_switch_port_offset
        except AttributeError:
            pass

        if config.TPCONF_config_topology == '1' and do_init_os == '1':
            # we cannot call init_topology directly, as it is decorated with
            # runs_once. in experiment.py we have empty host list whereas if we
            # run init_topology from command line we have the -H host list. executing
            # an runs_once task with empty host list (hosts set in execute call), it
            # will only be executed for the first host, which is not what we
            # want. in contrast if we have a host list in context, execute will be
            # executed once for each host (hence we need runs_once when called from
            # the command line).

            # sequentially configure switch
            execute(init_topology_switch,
                    switch,
                    port_prefix,
                    port_offset,
                    hosts=config.TPCONF_hosts)
            # configure hosts in parallel
            execute(init_topology_host, hosts=config.TPCONF_hosts)

    except AttributeError:
        pass

    file_cleanup(test_id_pfx)  # remove any .start files
    execute(get_host_info,
            netmac='0',
            hosts=config.TPCONF_router + config.TPCONF_hosts)
    execute(sanity_checks)
    execute(init_hosts, *args, **kwargs)

    # first is the legacy case with single router and single queue definitions
    # second is the multiple router case with several routers and several queue
    # definitions
    if isinstance(config.TPCONF_router_queues, list):
        # start queues/pipes
        config_router_queues(config.TPCONF_router_queues, config.TPCONF_router,
                             **kwargs)
        # show pipe setup
        execute(show_pipes, hosts=config.TPCONF_router)
    elif isinstance(config.TPCONF_router_queues, dict):
        for router in config.TPCONF_router_queues.keys():
            # start queues/pipes for router r
            config_router_queues(config.TPCONF_router_queues[router], [router],
                                 **kwargs)
            # show pipe setup
            execute(show_pipes, hosts=[router])

    # log config parameters
    execute(log_config_params,
            file_prefix=test_id,
            local_dir=test_id_pfx,
            hosts=['MAIN'],
            *args,
            **kwargs)
    # log host tcp settings
    execute(log_host_tcp,
            file_prefix=test_id,
            local_dir=test_id_pfx,
            hosts=['MAIN'],
            *args,
            **kwargs)

    # start all loggers
    execute(start_loggers,
            file_prefix=test_id,
            local_dir=test_id_pfx,
            remote_dir=config.TPCONF_remote_dir)

    # Start broadcast ping and loggers (if enabled)
    try:
        if config.TPCONF_bc_ping_enable == '1':
            # for multicast need IP of outgoing interface
            # which is router's control interface
            use_multicast = socket.gethostbyname(
                config.TPCONF_router[0].split(':')[0])

            # get configured broadcast or multicast address
            bc_addr = ''
            try:
                bc_addr = config.TPCONF_bc_ping_address
            except AttributeError:
                # use default multicast address
                bc_addr = '224.0.1.199'

            execute(start_bc_ping_loggers,
                    file_prefix=test_id,
                    local_dir=test_id_pfx,
                    remote_dir=config.TPCONF_remote_dir,
                    bc_addr=bc_addr)

            try:
                bc_ping_rate = config.TPCONF_bc_ping_rate
            except AttributeError:
                bc_ping_rate = '1'

            # start the broadcst ping on the first router
            execute(start_bc_ping,
                    file_prefix=test_id,
                    local_dir=test_id_pfx,
                    remote_dir=config.TPCONF_remote_dir,
                    bc_addr=bc_addr,
                    rate=bc_ping_rate,
                    use_multicast=use_multicast,
                    hosts=[config.TPCONF_router[0]])
    except AttributeError:
        pass

    # start traffic generators
    sync_delay = 5.0
    max_wait_time = sync_delay
    start_time = datetime.datetime.now()
    for t, c, v in sorted(config.TPCONF_traffic_gens, cmp=_cmp_timekeys):

        try:
            # delay everything to have synchronised start
            next_time = float(t) + sync_delay
        except ValueError:
            abort('Traffic generator entry key time must be a float')

        if next_time > max_wait_time:
            max_wait_time = next_time

        # add the kwargs parameter to the call of _param
        v = re.sub("(V_[a-zA-Z0-9_-]*)", "_param('\\1', kwargs)", v)

        # trim white space at both ends
        v = v.strip()

        if v[-1] != ',':
            v = v + ','
        # add counter parameter
        v += ' counter="%s"' % c
        # add file prefix parameter
        v += ', file_prefix=test_id'
        # add remote dir
        v += ', remote_dir=\'%s\'' % config.TPCONF_remote_dir
        # add test id prefix to put files into correct directory
        v += ', local_dir=\'%s\'' % test_id_pfx
        # we don't need to check for presence of tools inside start functions
        v += ', check="0"'

        # set wait time until process is started
        now = datetime.datetime.now()
        dt_diff = now - start_time
        sec_diff = (dt_diff.days * 24 * 3600 + dt_diff.seconds) + \
            (dt_diff.microseconds / 1000000.0)
        if next_time - sec_diff > 0:
            wait = str(next_time - sec_diff)
        else:
            wait = '0.0'
        v += ', wait="' + wait + '"'

        _nargs, _kwargs = eval('_args(%s)' % v)
        execute(*_nargs, **_kwargs)

    # print process list
    print_proc_list()

    # wait until finished (add additional 5 seconds to be sure)
    total_duration = float(duration) + max_wait_time + 5.0
    puts('\n[MAIN] Running experiment for %i seconds\n' % int(total_duration))
    time.sleep(total_duration)

    # shut everything down and get log data
    execute(stop_processes, local_dir=test_id_pfx)
    execute(log_queue_stats,
            file_prefix=test_id,
            local_dir=test_id_pfx,
            hosts=config.TPCONF_router)

    # log test id in completed list
    local('echo "%s" >> experiments_completed.txt' % test_id)

    # kill any remaining processes
    execute(kill_old_processes,
            hosts=config.TPCONF_router + config.TPCONF_hosts)

    # done
    puts('\n[MAIN] COMPLETED experiment %s \n' % test_id)
Example #40
0
def init_tc_pipe(counter='1',
                 source='',
                 dest='',
                 rate='',
                 delay='',
                 rtt='',
                 loss='',
                 queue_size='',
                 queue_size_mult='1.0',
                 queue_disc='',
                 queue_disc_params='',
                 bidir='0',
                 attach_to_queue=''):

    # compatibility with FreeBSD
    if queue_disc == 'fifo':
        # pfifo is the default for HTB classes
        queue_disc = 'pfifo'

    queue_size = str(queue_size)
    if queue_size.lower() == 'bdp':
        _rate = rate.replace('kbit', '000')
        _rate = _rate.replace('mbit', '000000')
        if rtt == '':
            rtt = str(2 * int(delay))
        if queue_disc == 'pfifo' or queue_disc == 'codel' or \
           queue_disc == 'fq_codel' or queue_disc == 'pie':
            # queue size in packets
            avg_packet = 600  # average packet size
            queue_size = int(
                float(_rate) * (float(rtt) / 1000.0) / 8 / avg_packet)
            if queue_size_mult != '1.0':
                queue_size = int(float(queue_size) * float(queue_size_mult))
            if queue_size < 1:
                queue_size = 1  # minimum 1 packet
            queue_size = str(queue_size)
        elif queue_disc == 'bfifo' or queue_disc == 'red':
            # queue size in bytes
            queue_size = int(float(_rate) * (float(rtt) / 1000.0) / 8)
            if queue_size_mult != '1.0':
                queue_size = int(float(queue_size) * float(queue_size_mult))
            if queue_size < 2048:
                queue_size = 2048  # minimum 2kB
            queue_size = str(queue_size)
        else:
            abort('Can\'t specify \'bdp\' for queuing discipline %s' %
                  queue_disc)

    # class/handle numbers
    class_no = str(int(counter) + 0)
    if attach_to_queue == '':
        queue_class_no = class_no
    else:
        # if attach_to_queue is set we attach this to existing (previously
        # configured pipe). this means packets will go through an existing htb
        # and leaf qdisc, but a separate netem.
        # so we can have different flows going through the same bottleneck
        # queue, but with different emulated delays or loss rates
        queue_class_no = attach_to_queue
    netem_class_no = class_no
    qdisc_no = str(int(counter) + 1000)
    netem_no = str(int(counter) + 1000)

    # disciplines: fq_codel, codel, red, choke, pfifo, pfifo_fast (standard
    # magic), pie (only as patch), ...
    if queue_disc == '':
        queue_disc = 'pfifo'
    # for pie we need to make sure the kernel module is loaded (for kernel pre
    # 3.14 only, for new kernels it happens automatically via tc use!)
    if queue_disc == 'pie':
        with settings(warn_only=True):
            run('modprobe pie')

    if rate == '':
        rate = '1000mbit'
    if queue_size == '':
        # set default queue size to 1000 packet (massive but default for e.g.
        # codel)
        queue_size = '1000'

    if loss != '':
        # convert to percentage
        loss = str(float(loss) * 100)

    interfaces = get_netint_cached(env.host_string, int_no=-1)

    # our approach works as follows:
    # - shaping, aqm and delay/loss emulation is done on egress interface
    #   (as usual)
    # - use htb qdisc for rate limiting with the aqm qdisc (e.g. pfifo, codel)
    #   as leave node
    # - after shaping and aqm, emulate loss and delay with netem
    # - for each "pipe" we setup a new class on all (two) interfaces
    # - if pipes are unidirectional a class is only used on one of the two ifaces;
    #   otherwise it is used on both interfaces (XXX could optimise the
    #   unidirectional case and omit unused pipes)
    # - traffic flow is as follows:
    #   1. packets are marked by iptables in mangle table POSTROUTING hook
    #      depending on defined source/dest (unique mark for each pipe)
    #   2. marked packets are classified into appropriate class (1-1 mapping
    #      between marks and classes) and redirected to pseudo interface
    #   3. pseudo interface does the shaping with htb and aqm (leaf qdisc)
    #   4. packets go back to actual interface
    #   5. actual interface does network emulation (delay/loss), here htb is set to
    # max rate (1Gbps) and pfifo is used (effectively no shaping or aqm here)

    # note that according to my information the htb has a build-in buffer of 1
    # packet as well (cannot be changed)

    cnt = 0
    for interface in interfaces:

        pseudo_interface = 'ifb' + str(cnt)

        # config rate limiting on pseudo interface
        config_tc_cmd = 'tc class add dev %s parent 1: classid 1:%s htb rate %s ceil %s' % \
            (pseudo_interface, queue_class_no, rate, rate)
        if attach_to_queue == '':
            run(config_tc_cmd)

        # config queuing discipline and buffer limit on pseudo interface
        config_tc_cmd = 'tc qdisc add dev %s parent 1:%s handle %s: %s limit %s %s' % \
            (pseudo_interface,
             queue_class_no,
             qdisc_no,
             queue_disc,
             queue_size,
             queue_disc_params)
        if attach_to_queue == '':
            run(config_tc_cmd)

        # configure filter to classify traffic based on mark on pseudo device
        config_tc_cmd = 'tc filter add dev %s protocol ip parent 1: ' \
                        'handle %s fw flowid 1:%s' % (
                            pseudo_interface, class_no, queue_class_no)
        run(config_tc_cmd)

        # configure class for actual interface with max rate
        config_tc_cmd = 'tc class add dev %s parent 1: classid 1:%s ' \
                        'htb rate 1000mbit ceil 1000mbit' % \
            (interface, netem_class_no)
        run(config_tc_cmd)

        # config netem on actual interface
        config_tc_cmd = 'tc qdisc add dev %s parent 1:%s handle %s: ' \
                        'netem limit 1000' % (
                            interface, netem_class_no, netem_no)
        if delay != "":
            config_tc_cmd += " delay %sms" % delay
        if loss != "":
            config_tc_cmd += " loss %s%%" % loss
        run(config_tc_cmd)

        # configure filter to redirect traffic to pseudo device first and also
        # classify traffic based on mark after leaving the pseudo interface traffic
        # will go back to actual interface
        config_tc_cmd = 'tc filter add dev %s protocol ip parent 1: handle %s ' \
                        'fw flowid 1:%s action mirred egress redirect dev %s' % \
            (interface, class_no, netem_class_no, pseudo_interface)
        run(config_tc_cmd)

        cnt += 1

    # filter on specific ips
    config_it_cmd = 'iptables -t mangle -A POSTROUTING -s %s -d %s -j MARK --set-mark %s' % \
        (source, dest, class_no)
    run(config_it_cmd)
    if bidir == '1':
        config_it_cmd = 'iptables -t mangle -A POSTROUTING -s %s -d %s -j MARK --set-mark %s' % \
            (dest, source, class_no)
        run(config_it_cmd)
Example #41
0
def test():
    with settings(warn_only=True):
        result = local('./manage.py test hmf_finder', capture=True)
    if result.failed and not confirm("Tests failed. Continue anyway?"):
        abort("Aborting at user request.")
Example #42
0
def adjust_timestamps(test_id='',
                      file_name='',
                      host_name='',
                      sep=' ',
                      out_dir=''):
    "Adjust timestamps in data file based on observed clock offsets"

    # out_dir is the user-specified out_dir we pass on to get_clock_offsets()
    if len(out_dir) > 0 and out_dir[-1] != '/':
        out_dir += '/'

    # out_dirname is the directory where the clockoffset file will be
    if out_dir == '' or out_dir[0] != '/':
        out_dirname = os.path.dirname(file_name)
    else:
        out_dirname = out_dir

    if out_dirname[-1] != '/':
        out_dirname += '/'

    # clock offset file name
    offs_fname = out_dirname + test_id + CLOCK_OFFSET_FILE_EXT
    # new file name
    new_fname = file_name + DATA_CORRECTED_FILE_EXT

    #print(offs_fname)

    if not os.path.isfile(offs_fname):
        execute(get_clock_offsets, test_id=test_id, out_dir=out_dir)

    if not os.path.isfile(offs_fname):
        # give up and just make a copy of the existing data, so we have a file
        # with .tscorr extension
        #warn('Cannot generate clock offset file, using uncorrected timestamps '
        #     'for experiment %s' % test_id)
        #local('cp %s %s' % (file_name, new_fname))

        # abort so we are on the safe side, user needs to fix or rerun with
        # ts_corerct=0
        abort('Cannot generate clock offset file for experiment %s' % test_id)

        return new_fname

    host_times = []
    last_offs = 0.0
    try:
        with open(offs_fname) as f:
            offs_lines = f.readlines()

            # find column (note # is first column in first row
            host_col = -1
            for col in offs_lines[0].rstrip().split(' '):
                if col == host_name:
                    break

                host_col += 1

            #print(host_name)
            #print(host_col)

            for line in offs_lines[1:]:
                line = line.rstrip()
                ref_time = line.split(' ')[0]
                offs = line.split(' ')[host_col]
                # if we have no data our offset for correction will be the
                # last offset != zero otherwise it will be the offset measured
                if offs == 'NA':
                    offs = last_offs
                else:
                    offs = float(offs)
                    last_offs = offs

                # XXX instead of using the instantenous offset values we may
                # want to do something better in the future, such as using
                # a weighted moving average etc.
                host_times.append((ref_time, offs))

    except IOError:
        abort('Cannot open file %s' % offs_fname)

    reader = csv.reader(open(file_name, 'r'), delimiter=sep)
    fout = open(new_fname, 'w')

    # index to curr_ref_time
    curr = 0
    for line in reader:
        time = line[0]

        # find the right time, we assume here that each offset is
        # valid from the time it was observed until the time the next
        # offset is observed
        while host_times[curr][0] < time or host_times[curr][0] == 'NA':
            curr += 1
        if curr > 0 and host_times[curr - 1][0] != 'NA':
            curr -= 1

        new_time = float(time) - host_times[curr][1]

        fout.write('{0:.6f}'.format(new_time) + sep)
        fout.write(sep.join(line[1:]))
        fout.write('\n')

    fout.close()

    return new_fname
Example #43
0
def get_netmac(internal_int='0'):
    "Get MAC address for external/ctrl network interface"

    # not so easy to get local mac address for all different OS
    # use following approach:
    # get all non-router macs via the router using arp -a
    # get router mac using ifconfig
    # if host is a vm which we access via localhost then use ifconfig not arp
    # method
    # XXX windows: getmac, ipconfig /all

    host_string = env.host_string

    # if we have a port then strip off port
    if host_string.find(':') > -1:
        host_string = host_string.split(':')[0]

    if host_string == 'localhost':
        host_string = '127.0.0.1'

    if host_string in config.TPCONF_router or host_string == '127.0.0.1':
        # get MAC of router

        htype = get_type_cached(env.host_string)

        # complicated awk code to get pairs of ip and mac addresses from
        # ifconfig
        if htype == 'FreeBSD':
            macips = run(
                'ifconfig | awk \'/ether / { printf("%s ", $0); next } 1\' | '
                + 'grep ether | grep "inet " | ' +
                'awk \'{ printf("%s %s\\n", $2, $4) }\'',
                shell=False)
        elif htype == 'Linux':
            macips = run(
                'ifconfig | awk \'/HWaddr / { printf("%s ", $0); next } 1\' | '
                + 'grep HWaddr | grep "inet " | ' +
                'awk \'{ printf("%s %s\\n", $5, $7) }\' | sed -e "s/addr://"')
        else:
            abort("Can't determine MAC address for OS %s" % htype)

        ip_mac_map = {}
        for line in macips.split('\n'):
            a = line.split(' ')
            ip_mac_map.update({a[1].strip(): a[0].strip()})
        # print(ip_mac_map)

        # check if it looks like a name
        if not re.match('[0-9.]+', host_string):
            # try dns and assume we get an answer
            ip = socket.gethostbyname(host_string)
        else:
            ip = host_string

        if ip != '127.0.0.1':
            mac = ip_mac_map.get(ip)
        else:
            # guess it's the first NIC
            # XXX should return MAC based on router IP, not simply the first
            mac = ip_mac_map.get(ip_mac_map.keys()[0])

    else:
        # get MAC of non-router

        if internal_int == '0':
            host_string = env.host_string
        else:
            host_string = config.TPCONF_host_internal_ip.get(
                env.host_string, '')[0]

        mac = execute(_get_netmac,
                      host_string.split(':')[0],
                      hosts=[config.TPCONF_router[0]])[config.TPCONF_router[0]]

    return mac.lower()
Example #44
0
File: fabfile.py Project: tjcsl/ion
def _require_root():
    """Check if running as root."""
    with hide("running"):
        if local("whoami", capture=True) != "root":
            abort("You must be root.")
Example #45
0
def get_clock_offsets(exp_list='experiments_completed.txt',
                      test_id='',
                      pkt_filter='',
                      baseline_host='',
                      out_dir=''):
    "Get clock offsets for all hosts"

    if len(out_dir) > 0 and out_dir[-1] != '/':
        out_dir += '/'

    if test_id == '':
        try:
            with open(exp_list) as f:
                test_id_arr = f.readlines()
        except IOError:
            abort('Cannot open file %s' % exp_list)
    else:
        test_id_arr = test_id.split(';')

    if len(test_id_arr) == 0 or test_id_arr[0] == '':
        abort('Must specify test_id parameter')

    # specify complete tcpdump parameter list
    tcpdump_filter = '-tt -r - -n ' + pkt_filter

    for test_id in test_id_arr:
        test_id = test_id.rstrip()

        # first find tcpdump files
        tcpdump_files = get_testid_file_list('', test_id, '_ctl.dmp.gz', '')

        if len(tcpdump_files) == 0:
            warn('No tcpdump files for control interface for %s' % test_id)
            continue

        # if we have tcpdumps for control interface we can assume broadcast ping
        # was enabled

        dir_name = os.path.dirname(tcpdump_files[0])
        # then look for tpconf_vars.log.gz file in that directory
        var_file = local('find -L %s -name "*tpconf_vars.log.gz"' % dir_name,
                         capture=True)

        bc_addr = ''
        router = ''

        if len(var_file) > 0:
            # new approach without using config.py
            # XXX no caching here yet, assume we only generate clockoffset file once
            # per experiment

            # unzip archived file
            local('gzip -cd %s > %s' % (var_file, TMP_CONF_FILE))

            # load the TPCONF_variables into oldconfig
            oldconfig = imp.load_source('oldconfig', TMP_CONF_FILE)

            # remove temporary unzipped file
            try:
                os.remove(TMP_CONF_FILE)
                os.remove(TMP_CONF_FILE +
                          'c')  # remove the compiled file as well
            except OSError:
                pass

            try:
                bc_addr = oldconfig.TPCONF_bc_ping_address
            except AttributeError:
                pass

            router_name = oldconfig.TPCONF_router[0].split(':')[0]

        else:
            # old approach using config.py

            try:
                bc_addr = config.TPCONF_bc_ping_address
            except AttributeError:
                pass

            router_name = config.TPCONF_router[0].split(':')[0]

        if bc_addr == '':
            # assume default multicast address
            bc_addr = '224.0.1.199'

        # specify complete tcpdump parameter list
        if pkt_filter != '':
            tcpdump_filter = '-tt -r - -n ' + pkt_filter
        else:
            tcpdump_filter = '-tt -r - -n ' + 'icmp and dst host ' + bc_addr

        if baseline_host == '':
            baseline_host = router_name

        #
        # now read timestamps from each host's tcpdump
        #

        # map of host names (or IPs) and sequence numbers to timestamps
        host_times = {}
        for tcpdump_file in tcpdump_files:
            host = local(
                'echo %s | sed "s/.*_\([a-z0-9\.]*\)_ctl.dmp.gz/\\1/"' %
                tcpdump_file,
                capture=True)
            host_times[host] = {}
            #print(host)
            #print(host_times)

            # We pipe gzcat through to tcpdump. Note, since tcpdump exits early
            # (due to "-c num_samples") gzcat's pipe will collapse and gzcat
            # will complain bitterly. So we dump its stderr to stderrhack.
            init_zcat = Popen(['zcat ' + tcpdump_file],
                              stdin=None,
                              stdout=PIPE,
                              stderr=stderrhack,
                              shell=True)
            init_tcpdump = Popen(['tcpdump ' + tcpdump_filter],
                                 stdin=init_zcat.stdout,
                                 stdout=PIPE,
                                 stderr=stderrhack,
                                 shell=True)

            for line in init_tcpdump.stdout.read().splitlines():
                _time = line.split(" ")[0]
                _seq = int(line.split(" ")[11].replace(',', ''))
                host_times[host][_seq] = _time

        #print(host_times)

        # get time differences and get host list
        diffs = {}
        ref_times = {}
        host_str = ''
        host_list = sorted(host_times.keys())
        # getting hosts from the config is problematic if different
        # experiments with different configs in same directory
        #host_list = sorted(config.TPCONF_router + config.TPCONF_hosts)

        for host in host_list:
            host_str += ' ' + host
            if host not in host_times:
                continue
            for seq in sorted(host_times[host].keys()):
                if seq not in diffs:
                    diffs[seq] = {}
                if baseline_host in host_times and seq in host_times[
                        baseline_host]:
                    diffs[seq][host] = float(host_times[host][seq]) - \
                        float(host_times[baseline_host][seq])
                    ref_times[seq] = host_times[baseline_host][seq]
                else:
                    # this should only happen if TPCONF_router was
                    # modified
                    warn('Cant find baseline host %s timestamp data' %
                         baseline_host)
                    diffs[seq][host] = None
                    ref_times[seq] = None

        #print(diffs)

        if out_dir == '' or out_dir[0] != '/':
            dir_name = os.path.dirname(tcpdump_files[0])
            out_dir = dir_name + '/' + out_dir
        mkdir_p(out_dir)
        out_name = out_dir + test_id + CLOCK_OFFSET_FILE_EXT

        # write table of offsets (rows = time, cols = hosts)
        f = open(out_name, 'w')
        f.write('# ref_time' + host_str + '\n')
        for seq in sorted(diffs.keys()):
            if ref_times[seq] is not None:
                f.write(ref_times[seq])
            else:
                # this case should not never happen
                continue

            f.write(' ')

            for host in host_list:
                if host in diffs[seq] and diffs[seq][host] is not None:
                    f.write('{0:.6f}'.format(diffs[seq][host]))
                else:
                    f.write('NA')
                if host != host_list[-1]:
                    f.write(' ')
            f.write('\n')

        f.close()
Example #46
0
File: fabfile.py Project: tjcsl/ion
def forcemigrate(app=None):
    """Force migrations to apply for a given app."""
    if app is None:
        abort("No app name given.")
    local("./manage.py migrate {} --fake".format(app))
    local("./manage.py migrate {}".format(app))
Example #47
0
    def handle(self, noinput, debug, remote='', *args, **options):
        # Load server config from project
        config, remote = load_config(env,
                                     remote,
                                     config_user='******',
                                     debug=debug)

        # Set local project path
        local_project_path = django_settings.SITE_ROOT

        print(os.getenv('DJANGO_SETTINGS_MODULE'))

        if django_settings.DEBUG:
            abort(
                "You're currently using your local settings file, you need use production instead.\n"
                "To use production settings pass `--settings={}` to the deploy command."
                .format(
                    os.getenv('DJANGO_SETTINGS_MODULE').replace(
                        '.local', '.production')))

        # Change into the local project folder
        with hide('output', 'running', 'warnings'):
            with lcd(local_project_path):

                # Get the Git repo URL.
                remotes = local('git remote', capture=True).split('\n')

                if len(remotes) == 1:
                    git_remote = local('git config --get remote.{}.url'.format(
                        remotes[0]),
                                       capture=True)
                else:

                    def validate_choice(choice):
                        if choice in remotes:
                            return choice
                        raise Exception('That is not a valid choice.')

                    choice = prompt('Which Git remote would you like to use?',
                                    validate=validate_choice)
                    git_remote = local(
                        'git config --get remote.{}.url'.format(choice),
                        capture=True)

                # Is this a bitbucket repo?
                is_bitbucket_repo = '*****@*****.**' in git_remote
                is_github_repo = 'github.com' in git_remote

                if is_bitbucket_repo:
                    bb_regex = re.match(r'git@bitbucket\.org:(.+)/(.+)\.git',
                                        git_remote)

                    if bb_regex:
                        bitbucket_account = bb_regex.group(1)
                        bitbucket_repo = bb_regex.group(2)
                    else:
                        raise Exception(
                            'Unable to determine Bitbucket details.')

                elif is_github_repo:
                    gh_regex = re.match(
                        r'(?:git@|https://)github.com[:/]([\w\-]+)/([\w\-.]+)\.git$',
                        git_remote)

                    if gh_regex:
                        github_account = gh_regex.group(1)
                        github_repo = gh_regex.group(2)
                    else:
                        raise Exception('Unable to determine Github details.')
                else:
                    raise Exception(
                        'Unable to determine Git host from remote URL: {}'.
                        format(git_remote))

                project_folder = local_project_path.replace(
                    os.path.abspath(os.path.join(local_project_path, '..')) +
                    '/', '')

                with settings(warn_only=True):
                    if local('[[ -e ../requirements.txt ]]').return_code:
                        raise Exception("No requirements.txt")

        # Compress the domain names for nginx
        domain_names = " ".join(django_settings.ALLOWED_HOSTS)

        # Use the site domain as a fallback domain
        fallback_domain_name = django_settings.SITE_DOMAIN

        if not noinput:
            fallback_domain_name = prompt('What should the default domain be?',
                                          default=fallback_domain_name)
            domain_names = prompt(
                'Which domains would you like to enable in nginx?',
                default=domain_names)
        else:
            print('Default domain: ', fallback_domain_name)
            print('Domains to be enabled in nginx: ', domain_names)

        # If the domain is pointing to the droplet already, we can setup SSL.
        setup_ssl_for = [
            domain_name for domain_name in domain_names.split(' ')
            if local('dig +short {}'.format(domain_name), capture=True) ==
            remote['server']['ip']
        ]

        if not setup_ssl_for:
            abort(
                "Sorry, it's $CURRENT_YEAR, you need to use SSL. Please update the domain DNS to point to {}."
                .format(remote['server']['ip']))

        for domain_name in domain_names.split(' '):
            if domain_name not in setup_ssl_for:
                print('SSL will not be configured for {}'.format(domain_name))

        # Override username (for DO hosts).
        if env.user == 'deploy':
            env.user = '******'

        # Print some information for the user
        print('')
        print('Project: {}'.format(project_folder))
        print('Server IP: {}'.format(env.host_string))
        print('Server user: {}'.format(env.user))
        print('')

        # Get BitBucket / Github details

        if is_bitbucket_repo:
            if os.environ.get('BITBUCKET_USERNAME', False) and os.environ.get(
                    'BITBUCKET_PASSWORD', False):
                bitbucket_username = os.environ.get('BITBUCKET_USERNAME')
                bitbucket_password = os.environ.get('BITBUCKET_PASSWORD')
            else:
                bitbucket_username = prompt(
                    'Please enter your BitBucket username:'******'Please enter your BitBucket password: '******'GITHUB_TOKEN', False):
                github_token = os.environ.get('GITHUB_TOKEN')
            else:
                github_token = prompt(
                    'Please enter your Github token (obtained from https://github.com/settings/tokens):'
                )

        circle_token = os.environ.get('CIRCLE_TOKEN', None)

        print("")

        # Create session_files
        session_files = {
            'gunicorn_start':
            NamedTemporaryFile(mode='w+', delete=False),
            'supervisor_config':
            NamedTemporaryFile(mode='w+', delete=False),
            'memcached_supervisor_config':
            NamedTemporaryFile(mode='w+', delete=False),
            'nginx_site_config':
            NamedTemporaryFile(mode='w+', delete=False),
            'apt_periodic':
            NamedTemporaryFile(mode='w+', delete=False),
            'certbot_cronjob':
            NamedTemporaryFile(mode='w+', delete=False),
        }

        # Parse files
        session_files['gunicorn_start'].write(
            render_to_string(
                'gunicorn_start', {
                    'project': project_folder,
                    'settings': remote['server'].get('settings_file',
                                                     'production')
                }))
        session_files['gunicorn_start'].close()

        session_files['supervisor_config'].write(
            render_to_string('supervisor_config', {'project': project_folder}))
        session_files['supervisor_config'].close()

        session_files['memcached_supervisor_config'].write(
            render_to_string('memcached_supervisor_config',
                             {'project': project_folder}))
        session_files['memcached_supervisor_config'].close()

        session_files['nginx_site_config'].write(
            render_to_string(
                'nginx_site_config', {
                    'project': project_folder,
                    'domain_names': domain_names,
                    'fallback_domain_name': fallback_domain_name
                }))
        session_files['nginx_site_config'].close()

        session_files['apt_periodic'].write(render_to_string('apt_periodic'))
        session_files['apt_periodic'].close()

        session_files['certbot_cronjob'].write(
            render_to_string('certbot_cronjob'))
        session_files['certbot_cronjob'].close()

        # Define the locales first.
        locale_tasks = [
            {
                'title':
                'Modify the locales config',
                'command':
                '; '.join([
                    "sed -i 's/^# en_GB.UTF-8/en_GB.UTF-8/' /etc/locale.gen",  # Uncomment the GB line
                ]),
            },
            {
                'title': 'Generate locales',
                'command': 'locale-gen --purge',
            },
            {
                'title': 'Modify default locales',
                'command': "sed -i 's/en_US/en_GB/' /etc/default/locale",
            },
            {
                'title':
                'Reconfigure locales',
                'command':
                'LANG=en_GB.UTF-8 dpkg-reconfigure -f noninteractive locales',
            },
        ]

        run_tasks(env, locale_tasks)

        # Check if optional packages are defined in the config.
        optional_packages = {}

        if 'optional_packages' in config:
            optional_packages = config['optional_packages']

        python_version_full = remote['server'].get('python_version', '3')
        python_version = python_version_full[0]
        pip_command = 'pip{}'.format(3 if python_version == '3' else '')
        python_command = 'python{}'.format(python_version_full)
        # Define base tasks
        base_tasks = [
            # Add nginx and Let's Encrypt PPAs.  We add them up here because an
            # `apt-get update` is require for them to be truly added and that
            # comes next.
            {
                'title': 'Add nginx PPA',
                'command': 'add-apt-repository -y ppa:nginx/stable',
            },
            {
                'title': "Add Let's Encrypt PPA",
                'command': 'add-apt-repository -y ppa:certbot/certbot',
            },
            {
                'title': 'Update apt cache',
                'command': 'apt-get update',
            },
            {
                'title': 'Upgrade everything',
                'command': 'apt-get upgrade -y',
            },
            {
                'title': 'Install unattended-upgrades',
                'command': 'apt-get install -y unattended-upgrades',
            },
            {
                'title':
                "Install the base packages",
                'command':
                'apt-get install -y {}'.format(' '.join([
                    # Base requirements
                    'build-essential',
                    'git',
                    'ufw',  # Installed by default on Ubuntu, not elsewhere

                    # Project requirements
                    '{}-dev'.format(python_command),
                    'python{}-pip'.format('3' if python_version ==
                                          '3' else ''),
                    'apache2-utils',  # Required for htpasswd
                    'python{}-passlib'.format(
                        '3' if python_version == '3' else ''
                    ),  # Required for generating the htpasswd file
                    'supervisor',
                    'libjpeg-dev',
                    'libffi-dev',
                    'libssl-dev',  # Required for nvm.
                    'nodejs',
                    'memcached',
                    'fail2ban',

                    # Nginx things
                    'nginx',
                    'certbot',
                    'python-certbot-nginx',

                    # Postgres requirements
                    'postgresql',
                    'libpq-dev',
                    'python{}-psycopg2'.format(3 if python_version == '3' else
                                               ''),  # TODO: Is this required?

                    # Required under Python 3.
                    'python3-venv' if python_version == '3' else '',

                    # Other
                    'libgeoip-dev'
                    if optional_packages.get('geoip', True) else '',
                    'libmysqlclient-dev' if optional_packages.get(
                        'mysql', True) else '',
                    'python3.6' if python_version_full == '3.6' else '',
                    'python3.6-dev' if python_version_full == '3.6' else '',
                ]))
            },
            {
                'title':
                'Adjust APT update intervals',
                'fabric_command':
                'put',
                'fabric_args': [
                    session_files['apt_periodic'].name,
                    '/etc/apt/apt.conf.d/10periodic'
                ],
            },
            {
                'title': 'Update pip',
                'command': '{} install -U pip'.format(pip_command),
            },
            {
                'title': 'Install virtualenv',
                'command': '{} install virtualenv'.format(pip_command),
            },
            {
                'title': 'Set the timezone to UTC',
                'command': 'timedatectl set-timezone UTC',
            }
        ]

        if python_version_full == '3.6':
            base_tasks.insert(
                0, {
                    'title': 'Add Python 3.6 PPA',
                    'command':
                    'add-apt-repository -y ppa:jonathonf/python-3.6',
                })

        run_tasks(env, base_tasks)

        # Configure swap
        swap_tasks = [{
            'title': 'Create a swap file',
            'command': 'fallocate -l 4G /swapfile',
        }, {
            'title': 'Set permissions on swapfile to 600',
            'command': 'chmod 0600 /swapfile'
        }, {
            'title': 'Format swapfile for swap',
            'command': 'mkswap /swapfile',
        }, {
            'title': 'Add the file to the system as a swap file',
            'command': 'swapon /swapfile',
        }, {
            'title':
            'Write fstab line for swapfile',
            'command':
            "echo '/swapfile none swap sw 0 0' >> /etc/fstab",
        }, {
            'title': 'Change swappiness',
            'command': 'sysctl vm.swappiness=10'
        }, {
            'title': 'Write swappiness to file',
            'command': "echo 'vm.swappiness=10' >> /etc/sysctl.conf",
        }, {
            'title': 'Reduce cache pressure',
            'command': 'sysctl vm.vfs_cache_pressure=50',
        }, {
            'title':
            'Write cache pressure to file',
            'command':
            "echo 'vm.vfs_cache_pressure=50' >> /etc/sysctl.conf",
        }]

        # Check to see if we've already configured a swap file. This handles
        # the case where the deploy command is being re-run.
        cache_pressure = run('cat /proc/sys/vm/vfs_cache_pressure')

        if cache_pressure != '50':
            run_tasks(env, swap_tasks)

        # Define SSH tasks
        ssh_tasks = [{
            'title': 'Create the application group',
            'command': 'addgroup --system webapps',
        }, {
            'title':
            'Add the application user',
            'command':
            'adduser --shell /bin/bash --system --disabled-password --ingroup webapps {name}'
            .format(name=project_folder, ),
        }, {
            'title': "Add .ssh folder to application user's home directory",
            'command': 'mkdir ~{}/.ssh'.format(project_folder),
        }, {
            'title':
            'Generate SSH keys for application user',
            'command':
            "ssh-keygen -C application-server -f ~{}/.ssh/id_rsa -N ''".format(
                project_folder, )
        }, {
            'title':
            'Make the application directory',
            'command':
            '; '.join([
                'mkdir -m 0775 -p /var/www/{project}',
                'chown {project}:webapps /var/www/{project}',
            ]).format(project=project_folder, ),
        }, {
            'title':
            'Check application user file permissions',
            'command':
            '&& '.join([
                'chmod 0750 ~{project}',
                'chmod 0700 ~{project}/.ssh',
                'chmod 0600 ~{project}/.ssh/id_rsa',
                'chmod 0644 ~{project}/.ssh/id_rsa.pub',
                'chown -R {project}:webapps ~{project}',
            ]).format(project=project_folder, ),
        }, {
            'title':
            'Add deploy user',
            'command':
            'adduser --shell /bin/bash --disabled-password --system --ingroup webapps deploy',
        }, {
            'title': "Add .ssh folder to deploy user's home directory",
            'command': 'mkdir ~deploy/.ssh',
        }, {
            'title':
            'Add authorized keys to deploy user',
            'command':
            'mv ~{}/.ssh/authorized_keys /home/deploy/.ssh/authorized_keys'.
            format(env.user, ),
        }, {
            'title':
            'Check deploy user file permissions',
            'command':
            '; '.join([
                'chmod 0750 ~deploy',
                'chmod 0700 ~deploy/.ssh',
                'chmod 0644 ~deploy/.ssh/authorized_keys',
                'chown -R deploy:webapps ~deploy',
            ]),
        }, {
            'title': 'Remove sudo group rights',
            'command': "sed -i 's/^%sudo/# %sudo/' /etc/sudoers",
        }, {
            'title':
            'Add deploy user to sudoers',
            'command':
            'echo "deploy ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/deploy',
        }, {
            'title':
            'Ensure the deploy sudoers file has the correct permissions',
            'command': 'chmod 0440 /etc/sudoers.d/deploy',
        }, {
            'title':
            'Disallow root SSH access',
            'command':
            "sed -i 's/^PermitRootLogin yes/PermitRootLogin no/' /etc/ssh/sshd_config",
        }, {
            'title':
            'Disallow password authentication',
            'command':
            "sed -i 's/^PasswordAuthentication yes/PasswordAuthentication no/' /etc/ssh/sshd_config",
        }, {
            'title': 'Restart SSH',
            'command': 'service ssh restart',
        }]
        run_tasks(env, ssh_tasks)

        # Define db tasks
        db_tasks = [
            {
                'title':
                'Create the application postgres role',
                'command':
                'su - postgres -c "createuser {name}"'.format(
                    name=remote['database']['name'], ),
            },
            {
                'title':
                'Ensure database is created',
                'command':
                'su - postgres -c "createdb {name} --encoding=UTF-8 --locale=en_GB.UTF-8 '
                '--template=template0 --owner={owner} --no-password"'.format(
                    name=remote['database']['name'],
                    owner=remote['database']['user'],
                ),
            },
            {
                'title':
                'Ensure user has access to the database',
                'command':
                'su - postgres -c "psql {name} -c \'GRANT ALL ON DATABASE {name} TO {owner}\'"'
                .format(
                    name=remote['database']['name'],
                    owner=remote['database']['user'],
                ),
            },
            {
                'title':
                'Ensure user does not have unnecessary privileges',
                'command':
                'su - postgres -c "psql {name} -c \'ALTER USER {owner} WITH NOSUPERUSER '
                'NOCREATEDB\'"'.format(
                    name=remote['database']['name'],
                    owner=remote['database']['user'],
                ),
            },
        ]
        run_tasks(env, db_tasks)

        # Get SSH Key from server
        ssh_key = run('cat ~{}/.ssh/id_rsa.pub'.format(project_folder))

        # Get the current SSH keys in the repo
        if is_bitbucket_repo:
            task_title = 'Checking bitbucket repository for an existing SSH key'

            title_print(task_title, state='task')

            try:
                repo_ssh_keys = requests.get(
                    'https://bitbucket.org/api/1.0/repositories/{}/{}/deploy-keys/'
                    .format(
                        bitbucket_account,
                        bitbucket_repo,
                    ),
                    auth=(bitbucket_username, bitbucket_password))
            except:
                title_print(task_title, state='failed')
                exit()

            title_print(task_title, state='succeeded')

            task_title = 'Adding the SSH key to bitbucket'

            if repo_ssh_keys.text.find(ssh_key) == -1:
                title_print(task_title, state='task')

                try:
                    requests.post(
                        'https://bitbucket.org/api/1.0/repositories/{}/{}/deploy-keys/'
                        .format(
                            bitbucket_account,
                            bitbucket_repo,
                        ),
                        data=urlencode({
                            'label':
                            'Application Server ({})'.format(env.host_string),
                            'key':
                            ssh_key,
                        }),
                        auth=(bitbucket_username, bitbucket_password))
                except Exception as e:
                    title_print(task_title, state='failed')
                    raise e

                title_print(task_title, state='succeeded')

        elif is_github_repo:
            task_title = 'Adding the SSH key to Github'

            title_print(task_title, state='task')

            try:
                response = requests.post(
                    'https://api.github.com/repos/{}/{}/keys'.format(
                        github_account, github_repo),
                    json={
                        'title':
                        'Application Server ({})'.format(env.host_string),
                        'key': ssh_key,
                        'read_only': True,
                    },
                    headers={'Authorization': 'token {}'.format(github_token)})

                if debug:
                    print(response.text)
            except Exception as e:
                title_print(task_title, state='failed')
                raise e

            title_print(task_title, state='succeeded')

        # Define git tasks
        if is_bitbucket_repo:
            git_url = '[email protected]:{}/{}.git'.format(
                bitbucket_account,
                bitbucket_repo,
            )
        elif is_github_repo:
            git_url = '[email protected]:{}/{}.git'.format(
                github_account,
                github_repo,
            )

        git_tasks = [
            {
                'title':
                'Add Github key to known hosts',
                'command':
                'ssh-keyscan -H github.com >> ~{project}/.ssh/known_hosts'.
                format(project=project_folder, ),
            },
            {
                'title':
                'Setup the Git repo',
                'command':
                'cd /tmp; git clone {url} {project}'.format(
                    url=git_url,
                    project='/var/www/{}'.format(project_folder, )),
            },
        ]
        run_tasks(env, git_tasks, user=project_folder)

        # Define static tasks
        static_tasks = [
            {
                'title':
                'Make the static directory',
                'command':
                '; '.join([
                    'mkdir -m 0775 -p {dir}',
                    'chown {project}:webapps {dir}',
                ]).format(
                    project=project_folder,
                    dir=django_settings.STATIC_ROOT,
                ),
            },
            {
                'title':
                'Make the media directory',
                'command':
                '; '.join([
                    'mkdir -m 0775 -p {dir}', 'chown {project}:webapps {dir}'
                ]).format(
                    project=project_folder,
                    dir=django_settings.MEDIA_ROOT,
                ),
            },
        ]
        run_tasks(env, static_tasks)

        virtualenv_command = (
            'virtualenv -p python{python_full} /var/www/{project}/.venv')
        # Define venv tasks
        venv_tasks = [
            {
                'title':
                'Create the virtualenv',
                'command':
                virtualenv_command.format(
                    python_full=python_version_full,
                    project=project_folder,
                ),
            },
            # This shouldn't be necessary (we think we upgraded pip earlier)
            # but it is - you'll get complaints about bdist_wheel without
            # this.
            {
                'title':
                'Upgrade pip inside the virtualenv',
                'command':
                '/var/www/{project}/.venv/bin/pip install --upgrade pip'.
                format(project=project_folder, ),
            },
        ]
        run_tasks(env, venv_tasks, user=project_folder)

        gunicorn_tasks = [
            {
                'title':
                'Create the Gunicorn script file',
                'fabric_command':
                'put',
                'fabric_args': [
                    session_files['gunicorn_start'].name,
                    '/var/www/{project}/gunicorn_start'.format(
                        project=project_folder, )
                ],
            },
            {
                'title':
                'Make the Gunicorn script file executable',
                'command':
                'chmod +x /var/www/{project}/gunicorn_start'.format(
                    project=project_folder, )
            },
            {
                'title':
                'chown the Gunicorn script file',
                'command':
                'chown {project}:webapps /var/www/{project}/gunicorn_start'.
                format(project=project_folder, )
            },
        ]
        run_tasks(env, gunicorn_tasks)

        log_tasks = [
            {
                'title':
                'Create the application log file',
                'command':
                '; '.join([
                    'touch /var/log/gunicorn_supervisor.log',
                    'chown {}:webapps /var/log/gunicorn_supervisor.log'.format(
                        project_folder, ),
                    'chmod 0644 /var/log/gunicorn_supervisor.log',
                ]),
            },
        ]
        run_tasks(env, log_tasks)

        requirement_tasks = [
            {
                # Check to see if we have a requirements file. Even though we check for
                # it at the start of the deployment process, it hasn't necessarily been
                # committed. So this check covers that.
                'title':
                "Install packages required by the Django app inside virtualenv",
                'command':
                'if [ -f /var/www/{project}/requirements.txt ]; then /var/www/{project}/.venv/bin/pip '
                'install -r /var/www/{project}/requirements.txt; fi'.format(
                    project=project_folder, ),
            },
            {
                'title':
                'Make sure Gunicorn is installed',
                'command':
                '/var/www/{project}/.venv/bin/pip install gunicorn'.format(
                    project=project_folder, ),
            },
        ]

        run_tasks(env, requirement_tasks, user=project_folder)

        # Define nginx tasks
        nginx_tasks = [
            {
                'title':
                'Ensure Nginx service is stopped',  # This allows Certbot to run.
                'command': 'service nginx stop',
            },
            {
                'title':
                'Create the Nginx configuration file',
                'fabric_command':
                'put',
                'fabric_args': [
                    session_files['nginx_site_config'].name,
                    '/etc/nginx/sites-available/{}'.format(project_folder, )
                ],
            },
            {
                'title': 'Create the .htpasswd file',
                'command': 'htpasswd -c -b /etc/nginx/htpasswd onespace media',
            },
            {
                'title': 'Ensure that the default site is disabled',
                'command': 'rm /etc/nginx/sites-enabled/default',
            },
            {
                'title':
                'Ensure that the application site is enabled',
                'command':
                'ln -s /etc/nginx/sites-available/{project} /etc/nginx/sites-enabled/{project}'
                .format(project=project_folder, ),
            },
            {
                'title':
                'Run certbot',
                'command':
                'certbot certonly --standalone -n --agree-tos --email [email protected] '
                '--cert-name {} --domains {}'.format(fallback_domain_name,
                                                     ','.join(setup_ssl_for)),
            },
            {
                'title':
                'Generate DH parameters (this may take a little while)',
                'command': 'openssl dhparam -out /etc/ssl/dhparam.pem 2048',
            },
            {
                'title': 'Ensure Nginx service is started',
                'command': 'service nginx start',
            },
            {
                'title':
                'Configure certbot cronjob',
                'fabric_command':
                'put',
                'fabric_args':
                [session_files['certbot_cronjob'].name, '/etc/cron.d/certbot'],
            },
            {
                'title':
                'Ensure the certbot cronjob has the correct file permissions',
                'command': 'chmod 0644 /etc/cron.d/certbot',
            },
        ]
        run_tasks(env, nginx_tasks)

        # Configure the firewall.
        firewall_tasks = [
            {
                'title': 'Allow SSH connections through the firewall',
                'command': 'ufw allow OpenSSH'
            },
            {
                'title': 'Allow SSH connections through the firewall',
                'command': 'ufw allow "Nginx Full"'
            },
            {
                'title': 'Enable the firewall, deny all other traffic',
                'command':
                'ufw --force enable',  # --force makes it non-interactive
            }
        ]

        run_tasks(env, firewall_tasks)

        # Define supervisor tasks
        supervisor_tasks = [
            {
                'title':
                'Create the Supervisor config file for the application',
                'fabric_command':
                'put',
                'fabric_args': [
                    session_files['supervisor_config'].name,
                    '/etc/supervisor/conf.d/{}.conf'.format(project_folder, )
                ],
            },
            {
                'title':
                'Stopping memcached and removing from startup runlevels',
                'command':
                '; '.join([
                    'service memcached stop',
                    'systemctl disable memcached',
                ]),
            },
            {
                'title':
                'Create the Supervisor config file for memcached',
                'fabric_command':
                'put',
                'fabric_args': [
                    session_files['memcached_supervisor_config'].name,
                    '/etc/supervisor/conf.d/memcached.conf'
                ],
            },
            {
                'title': 'Re-read the Supervisor config files',
                'command': 'supervisorctl reread',
            },
            {
                'title':
                'Update Supervisor to add the app in the process group',
                'command': 'supervisorctl update',
            },
        ]
        run_tasks(env, supervisor_tasks)

        # Define build system tasks
        build_systems = {
            "none": [],
            "npm": [{
                'title':
                'Install nvm',
                'command':
                'cd /tmp; curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.32.1/install.sh'
                ' | bash',
            }, {
                'title':
                'Activate nvm then install node and yarn',
                'command':
                '&& '.join([
                    'cd /var/www/{project}',
                    '. ~{project}/.nvm/nvm.sh',
                    'nvm install',
                    'npm install -g yarn',
                    'yarn',
                    'yarn run build',
                ]).format(project=project_folder, ),
            }, {
                'title':
                'Ensure static folder exists in project',
                'command':
                'if [ ! -d "/var/www/{project}/{project}/static/" ]; then mkdir /var/www/{project}/{'
                'project}/static/; fi'.format(project=project_folder, ),
            }, {
                'title':
                'Collect static files',
                'command':
                '/var/www/{project}/.venv/bin/python /var/www/{project}/manage.py collectstatic '
                '--noinput --link --settings={project}.settings.{settings}'.
                format(
                    project=project_folder,
                    settings=remote['server'].get('settings_file',
                                                  'production'),
                ),
            }]
        }

        run_tasks(env,
                  build_systems[remote['server'].get('build_system', 'none')],
                  user=project_folder)

        # Delete files
        for session_file in session_files:
            os.unlink(session_files[session_file].name)

        # Add the project to CircleCI
        circle_tasks = [
            {
                'title':
                'Create the CircleCI SSH key',
                'fabric_command':
                'local',
                'fabric_args':
                ['mkdir dist; ssh-keygen -C circleci -f dist/id_rsa -N '
                 ''],
            },
            {
                'title':
                'Follow the project on CircleCI',
                'fabric_command':
                'local',
                'fabric_args': [
                    'curl -X POST https://circleci.com/api/v1.1/project/github/{github_account}/{'
                    'github_repo}/follow?circle-token={circle_token}'.format(
                        github_account=github_account,
                        github_repo=github_repo,
                        circle_token=circle_token,
                    )
                ]
            },
            {
                'title':
                'Add private SSH key to CircleCI',
                'fabric_command':
                'local',
                'fabric_args': [
                    'curl -X POST --header "Content-Type: application/json" -d \'{{"hostname":"{'
                    'fallback_domain_name}","private_key":"{private_key}"}}\' '
                    'https://circleci.com/api/v1.1/project/github/{github_account}/{'
                    'github_repo}/ssh-key?circle-token={circle_token}'.format(
                        fallback_domain_name=fallback_domain_name,
                        private_key=open('dist/id_rsa', 'r').read(),
                        github_account=github_account,
                        github_repo=github_repo,
                        circle_token=circle_token,
                    )
                ]
            },
            {
                'title':
                'Add public key to server',
                'command':
                'echo "{}" >> ~deploy/.ssh/authorized_keys'.format(
                    open('dist/id_rsa.pub', 'r').read())
            },
        ]

        if circle_token and is_github_repo:
            run_tasks(env, circle_tasks)

        print(
            'Initial application deployment has completed. You should now pushdb and pushmedia.'
        )