Beispiel #1
0
def _setup_apache():
    _install_packages([
        'libapache2-mod-wsgi',
        'libapache2-mod-php5',
    ])

    files.upload_template("%(localdir)s/deploy/apache-main.conf" % env,
            "/etc/apache2/apache2.conf", use_sudo=True)

    files.upload_template("%(localdir)s/deploy/apache-site.conf" % env,
            "/etc/apache2/sites-available/%(project_name)s" % env, use_sudo=True)

    files.upload_template("%(localdir)s/deploy/standby.conf" % env,
            "/etc/apache2/sites-available/standby", use_sudo=True)

    local("cd %(localdir)s/deploy/503 && tar cjf ../503.tar.bz2 *" % env)
    files.put("%(localdir)s/deploy/503.tar.bz2" % env, "/tmp")
    sudo("tar xf /tmp/503.tar.bz2 -C /var/www && rm /tmp/503.tar.bz2")
    local("rm -f %(localdir)s/deploy/503.tar.bz2" % env)

    sudo("/usr/sbin/a2dismod "
         "cgid auth_basic authn_file authz_default authz_user "
         "authz_groupfile autoindex negotiation setenvif deflate")

    sudo("/usr/sbin/a2dissite default default-ssl standby")
    sudo("/usr/sbin/a2ensite %(project_name)s" % env)

    sudo("rm -f /var/www/index.html")

    _restart_service("apache2")
def setup_celery_backend(rds_host, user_key, user_secret):
    '''
    The real configuration happens here.
    '''
    logging.info('Updating Ubuntu\'s repository index')
    sudo('apt-get update')
    
    # Not sure why, but sometimes I get "E: Unable to locate package git"
    # trying to solve this with a sleep.
    time.sleep(2)
    sudo('apt-get update')
    
    logging.info('Installing ubuntu packages')
    for pkg in ['git', 'python-pip', 'joe', 'python-mysqldb', 'supervisor']:
        sudo('apt-get install -y -q %s' % pkg)
    
    
    logging.info('Getting celery application source code')
    with cd('/tmp/'):
        sudo('ssh-keyscan -H github.com > /root/.ssh/known_hosts')
        
        put(DEPLOY_PRIVATE_PATH, '/root/.ssh/id_rsa', use_sudo=True)
        put(DEPLOY_PUBLIC_PATH, '/root/.ssh/id_rsa.pub', use_sudo=True)
    
        sudo('chmod 600 /root/.ssh/id_rsa')
        sudo('chmod 600 /root/.ssh/id_rsa.pub')
        
        sudo('git clone %s' % VULNWEB_REPO)
    
    logging.info('Installing requirements.txt (this takes time!)')
    with cd('/tmp/nimbostratus-target/'):
        sudo('git checkout %s' % VULNWEB_BRANCH)
        sudo('pip install --use-mirrors --upgrade -r requirements.txt')

    vulnweb_root = '/tmp/nimbostratus-target/servers/django_frontend/vulnweb'

    logging.info('Configuring django-celery application')
    # Overwrite the application configuration files
    upload_template('servers/celery_backend/broker.config',
                    '%s/vulnweb/broker.py' % vulnweb_root,
                    context={'access': user_key,
                             'secret': user_secret},
                    backup=False, use_sudo=True)

    upload_template('servers/celery_backend/databases.config',
                    '%s/vulnweb/databases.py' % vulnweb_root,
                    context={'user': LOW_PRIV_USER,
                             'password': LOW_PRIV_PASSWORD,
                             'host': rds_host},
                    backup=False, use_sudo=True)

    upload_template('servers/celery_backend/supervisor.config',
                    '/etc/supervisor/conf.d/celery.conf',
                    context={'django_root_path': vulnweb_root},
                    backup=False, use_sudo=True)

    sudo('supervisorctl update')
    
    with cd(vulnweb_root):
        sudo('python manage.py syncdb --noinput')
Beispiel #3
0
def _configure_mongo_instance(ec2_instance):
    """ Configures a mongo instance for use in the PD Stack.

    Configures RAID volumes, configuration files, etc.

    parameters:
    ec2_instance -- the MongoDB EC2 Instance to configure.
      Tags on the MongoDB Instance Include:
         Name: [region code]_[environment code]_MONGO
         RaidLevel: None or raid-0, raid-10
         AttachedDevices: None or /dev/xvdf,/dev/xvdg.etc
         NodeType: Arbiter, StandAlone, DataNode
         ElasticIP: None or ElasticIP Address bound to the instance
    """
    # use ebs attached devices if present otherwise plop the data on the ephemeral drive
    raid_level = ec2_instance.tags.get('RaidLevel')
    db_path = '/mnt/ebs/mongodb/data' if raid_level is not None else '/mnt/mongodb/data'

    # configure permissions for mongo directories
    sudo('mkdir -p {0}'.format(db_path))
    sudo('chown -R mongodb:nogroup {0}'.format(db_path))
    sudo('chown -R mongodb:nogroup /var/log/mongodb')

    # upload files
    put('uploads/mongodb',
        '/etc/init.d/mongodb',
        use_sudo=True
    )
    sudo('chmod u+x /etc/init.d/mongodb')

    sudo('echo "dbpath={0}" >> /etc/mongodb.conf'.format(db_path))

    aws_tag_name = ec2_instance.tags.get('Name')
    sudo('echo "replSet={0}" >> /etc/mongodb.conf'.format(aws_tag_name))
Beispiel #4
0
def install_roundup(url_base = '/'):
	"""
	Install a Roundup Tracker as a FCGI application on `url_base`.
	"""

	run('.local/bin/easy_install roundup')
	run('.local/bin/easy_install flup')

	url_base = url_base.strip('/')
	# set up the FCGI handler
	files.append('public_html/.htaccess', [
		'AddHandler fcgid-script .fcgi',
		'RewriteRule ^{url_base}/(.*)$ /cgi-bin/roundup.fcgi/$1 [last]'.format(**vars()),
	])

	# install the cherrypy fcgi handler
	runner = io.StringIO(textwrap.dedent(u"""
		#!/home2/adamsrow/python2.7/bin/python
		import os
		from flup.server.fcgi import WSGIServer

		home = os.environ['HOME']
		tracker_home = os.path.join(home, 'Adams Row Tracker'))

		from roundup import configuration
		from roundup.cgi.wsgi_handler import RequestDispatcher

		srv = WSGIServer(RequestDispatcher(tracker_home))
		srv.run()
	""").lstrip())

	files.put(runner, 'public_html/cgi-bin/roundup.fcgi')
	run('chmod 755 public_html/cgi-bin/roundup.fcgi')
Beispiel #5
0
    def render(template, output, var_file=None, engine='templite',
               encoding=sys.getfilesystemencoding(), use_sudo=False, **kwargs):
        """
            Fabric task to render a template on a remote serveur.

            Template variables are extracted from var_file if any, and
            all addition arguments from `kwargs`.

            The template files must be on the LOCAL machine.
        """

        context = {}
        if var_file:
            context.update(extract_context(var_file, encoding))

        for name, value in kwargs.items():
            try:
                context[name] = ast.literal_eval(value)
            except:
                context[name] = value

        try:
            r = globals()['%s_render_file' % engine]
            f = tempfile.NamedTemporaryFile(delete=False)
            f.write(r(template, context, encoding=encoding))
            f.close()
            put(f.name, unicode(output))
        except KeyError:
            sys.exit('Could not import "%s" lib for rendering. Is it installed ?' % engine)
        finally:
            try:
                f.close()
                path(f.name).remove()
            except:
                pass
Beispiel #6
0
def ubuntu_install_dokku(domain, *args, **kwargs):
    apt_depends('curl')
    run('curl -sL https://get.docker.io/gpg 2> /dev/null | sudo apt-key add - 2>&1 >/dev/null')
    run('curl -sL https://packagecloud.io/gpg.key 2> /dev/null | sudo apt-key add - 2>&1 >/dev/null')

    sudo('echo "deb http://get.docker.io/ubuntu docker main" > /etc/apt/sources.list.d/docker.list')
    sudo('echo "deb https://packagecloud.io/dokku/dokku/ubuntu/ trusty main" > /etc/apt/sources.list.d/dokku.list')

    uname_r = run('uname -r')
    apt_depends('linux-image-extra-{uname_r}'.format(uname_r=uname_r), 'apt-transport-https')

    sudo('echo "dokku dokku/web_config boolean false" | debconf-set-selections')
    sudo('echo "dokku dokku/vhost_enable boolean true" | debconf-set-selections')
    sudo('echo "dokku dokku/hostname string {domain}" | debconf-set-selections'.format(domain=domain))

    # TODO: Something better than this:
    pub_key_path = '/root/.ssh/id_rsa.pub'
    put(environ['PUBLIC_KEY_PATH'], pub_key_path, use_sudo=True, mode=0400)

    sudo('echo "dokku dokku/key_file string {pub_key_path}" | debconf-set-selections'.format(pub_key_path=pub_key_path))
    apt_depends('dokku')

    '''
    run('ssh-keygen -b 2048 -t dsa -f ~/.ssh/id_dsa -q -N ""')
    run('wget https://raw.github.com/progrium/dokku/v0.3.18/bootstrap.sh')
    run('sudo DOKKU_TAG=v0.3.18 bash bootstrap.sh')
    '''
    '''
    run('curl --silent https://get.docker.io/gpg 2> /dev/null | apt-key add - 2>&1 >/dev/null')
    run('curl --silent https://packagecloud.io/gpg.key 2> /dev/null | apt-key add - 2>&1 >/dev/null')

    run('echo "deb http://get.docker.io/ubuntu docker main" | sudo tee -a /etc/apt/sources.list.d/docker.list')
    run('echo "deb https://packagecloud.io/dokku/dokku/ubuntu/ trusty main" | sudo tee -a /etc/apt/sources.list.d/dokku.list')
    '''
    '''
Beispiel #7
0
def _configure_mongo_instance(ec2_instance):
    """ Configures a mongo instance for use in the PD Stack.

    Configures RAID volumes, configuration files, etc.

    parameters:
    ec2_instance -- the MongoDB EC2 Instance to configure.
      Tags on the MongoDB Instance Include:
         Name: [region code]_[environment code]_MONGO
         RaidLevel: None or raid-0, raid-10
         AttachedDevices: None or /dev/xvdf,/dev/xvdg.etc
         NodeType: Arbiter, StandAlone, DataNode
         ElasticIP: None or ElasticIP Address bound to the instance
    """
    # use ebs attached devices if present otherwise plop the data on the ephemeral drive
    raid_level = ec2_instance.tags.get('RaidLevel')
    db_path = '/mnt/ebs/mongodb/data' if raid_level is not None else '/mnt/mongodb/data'

    # configure permissions for mongo directories
    sudo('mkdir -p {0}'.format(db_path))
    sudo('chown -R mongodb:nogroup {0}'.format(db_path))
    sudo('chown -R mongodb:nogroup /var/log/mongodb')

    # upload files
    put('uploads/mongodb', '/etc/init.d/mongodb', use_sudo=True)
    sudo('chmod u+x /etc/init.d/mongodb')

    sudo('echo "dbpath={0}" >> /etc/mongodb.conf'.format(db_path))

    aws_tag_name = ec2_instance.tags.get('Name')
    sudo('echo "replSet={0}" >> /etc/mongodb.conf'.format(aws_tag_name))
def upload_directory():
    """ puts files on all servers
    """
    global remote_folder
    sudo('mkdir -p {0}'.format(remote_folder))
    with lcd(config.LOCAL_DIRECTORY):
      put("*", remote_folder,True)
Beispiel #9
0
def sent_ssh_key():
    with cd("~/.ssh"):
        if run("cat ~/.ssh/authorized_keys | grep baiyunhui") != "":
            return

        put("~/.ssh/id_rsa.pub", "~/.ssh/tmp.pub")
        run("cat tmp.pub >> authorized_keys")
        run("rm tmp.pub")
Beispiel #10
0
def copy_ssh_key(ssh_pub_key="~/.ssh/id_dsa.pub", user=env.user):
    """ Copy the local ssh key to the cluster machines """
    ssh_pub_key_path = os.path.expanduser(ssh_pub_key)
    remote = "tmpkey.pem"
    put(ssh_pub_key_path, remote)
    sudo("mkdir -p ~{}/.ssh".format(user))
    sudo("cat ~{}/{} >> ~{}/.ssh/authorized_keys".format(user, remote, user))
    sudo("chown {}:{} ~{}/.ssh".format(user, user, user))
    sudo("chown {}:{} ~{}/.ssh/authorized_keys".format(user, user, user))
    sudo("rm ~{}/{}".format(user, remote))
def copy_ssh_key(ssh_pub_key="~/.ssh/id_dsa.pub", user=env.user):
    """ Copy the local ssh key to the cluster machines """
    ssh_pub_key_path = os.path.expanduser(ssh_pub_key)
    remote = "tmpkey.pem"
    put(ssh_pub_key_path, remote)
    sudo("mkdir -p ~{}/.ssh".format(user))
    sudo("cat ~{}/{} >> ~{}/.ssh/authorized_keys".format(user, remote, user))
    sudo("chown {}:{} ~{}/.ssh".format(user, user, user))
    sudo("chown {}:{} ~{}/.ssh/authorized_keys".format(user, user, user))
    sudo("rm ~{}/{}".format(user, remote))
Beispiel #12
0
def install_database(source='db'):
	"""
	After having copied the database to a local source, install it to the
	target.
	"""
	target = '{install_root}/site'.format(**globals())
	if files.exists(target+'/db'):
		print("database already present")
		return
	files.put(local_path=source, remote_path=target, use_sudo=True)
def saveGZfiles():
    """
    Saves the three .gz files in the storage node
    """
    filenames = ['account.ring.gz','container.ring.gz','object.ring.gz']

    with cd('/etc/swift'):
        for filename in filenames:
            put(local_path='root@controller/'+filename,
                    remote_path='/etc/swift/'+filename)
Beispiel #14
0
def saveGZfiles():
    """
    Saves the three .gz files in the storage node
    """
    filenames = ['account.ring.gz', 'container.ring.gz', 'object.ring.gz']

    with cd('/etc/swift'):
        for filename in filenames:
            put(local_path='root@controller/' + filename,
                remote_path='/etc/swift/' + filename)
Beispiel #15
0
def setup():

    # install packages
    sudo("aptitude update")
    sudo("aptitude -y install %s" % (' '.join(PACKAGES), ))

    # clone repo
    copy_keys()
    run("cd %(server_project_base)s; git clone %(git_repo)s;" % env)

    # build environment
    sudo("easy_install virtualenv")
    # use --system-site-packages; pyscopg2 is installed globally
    run("virtualenv --system-site-packages %s" % env.environment_dir)
    run('''source %(environment_dir)sbin/activate;
           pip install -r %(server_project_dir)s/requirements.txt''' % env)

    # nginx
    files.upload_template("%(project_dir)s/deploy/nginx/server.conf" % env,
                          "/etc/nginx/nginx.conf",
                          use_sudo=True,
                          context=env)
    sudo("rm -f /etc/nginx/sites-available/default")
    sudo("rm -f /etc/nginx/sites-available/%(project_name)s.conf" % env)
    sudo("rm -f /etc/nginx/sites-enabled/%(project_name)s.conf" % env)
    files.upload_template("%s/deploy/nginx/site.conf" % env.project_dir,
                          "/etc/nginx/sites-available/%(project_name)s.conf" %
                          env,
                          context=env,
                          use_sudo=True)
    sudo('''sudo ln -s /etc/nginx/sites-available/%(project_name)s.conf \
            /etc/nginx/sites-enabled/%(project_name)s.conf''' % env)

    # uwsgi
    sudo("rm -f /etc/init/uwsgi.conf")
    files.put("%s/deploy/uwsgi/upstart.conf" % env.project_dir,
              "/etc/init/uwsgi.conf",
              use_sudo=True)

    sudo("rm -f /etc/uwsgi/apps-available/%(project_name)s.ini" % env)
    sudo("rm -f /etc/uwsgi/apps-enabled/%(project_name)s.ini" % env)
    files.upload_template("%(project_dir)s/deploy/uwsgi/app.ini" % env,
                          "/etc/uwsgi/apps-available/%(project_name)s.ini" %
                          env,
                          use_sudo=True,
                          context=env)
    sudo('''sudo ln -s /etc/uwsgi/apps-available/%(project_name)s.ini \
            /etc/uwsgi/apps-enabled/%(project_name)s.ini''' % env)

    # syncdb
    run("""cd %(server_project_dir)s/;
           source %(environment_dir)sbin/activate; 
           python manage.py syncdb --noinput;""" % env)
    deploy()
def install_go():
    url = "https://storage.googleapis.com/golang/go1.5.1.linux-amd64.tar.gz"
    filename = url.rpartition('/')[2]
    target = '/tmp/' + filename
    if not exists(target):
        run("wget -nv {} -O {}".format(url, target))
    sudo("tar -C /usr/local -xzf {}".format(target))
    #run("rm {}".format(target))

    golang_profile = '/etc/profile.d/golang.sh'
    put('files/golang.profile', golang_profile, use_sudo=True)
    run("source {}; go get github.com/tools/godep".format(golang_profile))
def write_file(source, target):
    tf = tempfile.NamedTemporaryFile()
    put(source, tf.name)

    try:
        env.warn_only = True
        result = run('mv %s %s' % (tf.name, target))
        if result.failed:
            print >> sys.stderr, 'got result %s when trying to move file to target, retrying with sudo' % (result,)
            run('sudo mv %s %s' % (tf.name, target))
    finally:
        env.warn_only = False
def write_file(source, target):
    tf = tempfile.NamedTemporaryFile()
    put(source, tf.name)

    try:
        env.warn_only = True
        result = run(f'mv {tf.name} {target}')
        if result.failed:
            print(f'got result {result} when trying to move file to target, retrying with sudo', file=sys.stderr)
            result = run(f'sudo mv {tf.name} {target}')
    finally:
        env.warn_only = False
Beispiel #19
0
def _upload_postactivate(postactivate_file, venv_folder, bin_folder):
    """ Uploads postactivate shell script file to server. """
    # full filepath for the uploaded file.
    postactivate_path = "{bin}/postactivate".format(bin=bin_folder)
    # full filepath for python virtual environment activation shellscript on
    # the server.
    activate_path = "{venv}/bin/activate".format(venv=venv_folder)
    # add bash command to activate shellscript to source (run) postactivate
    # script when the virtualenvironment is activated.
    append(activate_path, "source {postactivate}".format(postactivate=postactivate_path))
    # upload file.
    put(postactivate_file, postactivate_path)
Beispiel #20
0
def ubuntu_install_dokku(domain, *args, **kwargs):
    apt_depends("curl")
    run(
        "curl -sL https://get.docker.io/gpg 2> /dev/null | sudo apt-key add - 2>&1 >/dev/null"
    )
    run(
        "curl -sL https://packagecloud.io/gpg.key 2> /dev/null | sudo apt-key add - 2>&1 >/dev/null"
    )

    sudo(
        'echo "deb http://get.docker.io/ubuntu docker main" > /etc/apt/sources.list.d/docker.list'
    )
    sudo(
        'echo "deb https://packagecloud.io/dokku/dokku/ubuntu/ trusty main" > /etc/apt/sources.list.d/dokku.list'
    )

    uname_r = run("uname -r")
    apt_depends(
        "linux-image-extra-{uname_r}".format(uname_r=uname_r), "apt-transport-https"
    )

    sudo('echo "dokku dokku/web_config boolean false" | debconf-set-selections')
    sudo('echo "dokku dokku/vhost_enable boolean true" | debconf-set-selections')
    sudo(
        'echo "dokku dokku/hostname string {domain}" | debconf-set-selections'.format(
            domain=domain
        )
    )

    # TODO: Something better than this:
    pub_key_path = "/root/.ssh/id_rsa.pub"
    put(environ["PUBLIC_KEY_PATH"], pub_key_path, use_sudo=True, mode=0o400)

    sudo(
        'echo "dokku dokku/key_file string {pub_key_path}" | debconf-set-selections'.format(
            pub_key_path=pub_key_path
        )
    )
    apt_depends("dokku")

    """
    run('ssh-keygen -b 2048 -t dsa -f ~/.ssh/id_dsa -q -N ""')
    run('wget https://raw.github.com/progrium/dokku/v0.3.18/bootstrap.sh')
    run('sudo DOKKU_TAG=v0.3.18 bash bootstrap.sh')
    """
    """
    run('curl --silent https://get.docker.io/gpg 2> /dev/null | apt-key add - 2>&1 >/dev/null')
    run('curl --silent https://packagecloud.io/gpg.key 2> /dev/null | apt-key add - 2>&1 >/dev/null')

    run('echo "deb http://get.docker.io/ubuntu docker main" | sudo tee -a /etc/apt/sources.list.d/docker.list')
    run('echo "deb https://packagecloud.io/dokku/dokku/ubuntu/ trusty main" | sudo tee -a /etc/apt/sources.list.d/dokku.list')
    """
    """
def write_file(source, target):
    tf = tempfile.NamedTemporaryFile()
    put(source, tf.name)

    try:
        env.warn_only = True
        result = run(f'mv {tf.name} {target}')
        if result.failed:
            print(f'got result {result} when trying to move file to target, retrying with sudo', file=sys.stderr)
            result = run(f'sudo mv {tf.name} {target}')
    finally:
        env.warn_only = False
def install_go():
    url="https://storage.googleapis.com/golang/go1.5.1.linux-amd64.tar.gz"
    filename = url.rpartition('/')[2]
    target='/tmp/' + filename
    if not exists(target):
        run("wget -nv {} -O {}".format(url, target))
    sudo("tar -C /usr/local -xzf {}".format(target))
    #run("rm {}".format(target))

    golang_profile = '/etc/profile.d/golang.sh'
    put('files/golang.profile', golang_profile, use_sudo=True)
    run("source {}; go get github.com/tools/godep".format(golang_profile))
Beispiel #23
0
    def run(self):
        run('mkdir -p {}'.format('Downloads'))
        with cd('~/Downloads'):
            if not exists('gitbucket_{}.war'.format(self.version)):
                run('wget https://github.com/gitbucket/gitbucket/'
                    'releases/download/{}/gitbucket.war -O '
                    'gitbucket_{}.war'.format(self.version, self.version))
        sudo('cp ~/Downloads/gitbucket_{version}.war /var/lib/{tomcat}/'
             'webapps/gitbucket.war'.format(version=self.version,
                                            tomcat=self.tomcat.name))
        self.tomcat.restart()  # create ${GITBUCKET_HOME}/.gitbucket

        for i in range(10):
            if exists(self.gitbucket_home):
                break
            time.sleep(1)  # gitbucket_home not created...

        put(os.path.join(TEMPLATE_DIR, 'gitbucket_backup.sh'),
            '{}/backup.sh'.format(self.gitbucket_home),
            use_sudo=True)
        sudo('chmod +x {}/backup.sh'.format(self.gitbucket_home))

        if self.db:
            d = dict(
                database=self.db.database,
                user=self.db.user,
                password=self.db.password,
                hostname=self.db.hostname,
            )
            if isinstance(self.db, PostgresDatabase):
                d['db_type'] = 'postgresql'
            else:
                raise Exception("Unknown database type")
            upload_template('gitbucket_database.conf',
                            '{}/database.conf'.format(self.gitbucket_home),
                            context=d,
                            template_dir=TEMPLATE_DIR,
                            use_jinja=True,
                            use_sudo=True)

        for key, url in self.plugins.items():
            sudo('mkdir -p {}/plugins'.format(self.gitbucket_home))
            sudo('chown -R {tomcat}:{tomcat} {home}/plugins'.format(
                home=self.gitbucket_home, tomcat=self.tomcat.name))
            with cd('~/Downloads'):
                if not exists('{}.jar'.format(key)):
                    run('wget {} -O {}.jar'.format(url, key))
                sudo('cp {key}.jar {home}/plugins/{key}.jar'.format(
                    key=key, home=self.gitbucket_home))
                sudo('chown {tomcat}:{tomcat} {home}/plugins/{key}.jar'.format(
                    key=key, home=self.gitbucket_home,
                    tomcat=self.tomcat.name))
        self.tomcat.restart()
Beispiel #24
0
def install_systemd():
    """
    On newer versions of Ubuntu, make sure that systemd is configured
    to manage the service.
    https://docs.mongodb.com/v3.2/tutorial/install-mongodb-on-ubuntu/#ubuntu-16-04-only-create-systemd-service-file
    """
    if apt.lsb_version() < '15.04':
        return

    fn = 'mongod.service'
    service_strm = pkg_resources.resource_stream(__name__, fn)
    files.put(service_strm, '/lib/systemd/system/' + fn, use_sudo=True)
    sudo('systemctl enable mongod')
def _upload_postactivate(postactivate_file, venv_folder, bin_folder):
    """ Uploads postactivate shell script file to server. """
    # full filepath for the uploaded file.
    postactivate_path = '{bin}/postactivate'.format(bin=bin_folder, )
    # full filepath for python virtual environment activation shellscript on
    # the server.
    activate_path = '{venv}/bin/activate'.format(venv=venv_folder, )
    # add bash command to activate shellscript to source (run) postactivate
    # script when the virtualenvironment is activated.
    append(activate_path,
           'source {postactivate}'.format(postactivate=postactivate_path, ))
    # upload file.
    put(postactivate_file, postactivate_path)
Beispiel #26
0
def configure_instances(aws_region, aws_tag):
    """ Configures a tagged AWS instance for use.

    General configuration tasks include RAID configuration.
    Specific tasks are executed based on the AWS "Name" tag

    parameters:
    aws_region -- The AWS region identifier code (us-east-1, us-west-1, us-west-2, etc)
    aws_tag -- The AWS "Name" tag
    """
    ec2_conn = _get_ec2_connection(aws_region)
    reservation = ec2_conn.get_all_instances(filters={
        'instance-state-name': 'running',
        'tag:Name': '{0}'.format(aws_tag)
    })

    for r in reservation:
        for i in r.instances:
            # tag elastic ips so we can associate it on startup
            elastic_ips = ec2_conn.get_all_addresses(
                filters={'instance-id': i.id})
            if len(elastic_ips) > 0 and i.tags.get('ElasticIP') is not None:
                i.add_tag('ElasticIP', elastic_ips[0].public_ip)

            with settings(host_string=i.public_dns_name,
                          key_filename=config.LOCAL_AWS_KEY_FILE,
                          user=config.AWS_USER_NAME,
                          connection_attempts=10):

                # configure storage (raid vs epehemeral)
                put('uploads/configure-raid.py',
                    '/tmp/configure-raid.py',
                    use_sudo=True)

                raid_level = i.tags.get('RaidLevel')
                attached_devices = i.tags.get('AttachedDevices')

                print('Configuring storage')
                if attached_devices is not None:
                    if raid_level is not None:
                        sudo('python /tmp/configure-raid.py {0} {1}'.format(
                            raid_level, attached_devices))
                    else:
                        _mount_ebs_volume(attached_devices, '/mnt/ebs')

                # node specific configurations
                # note that we're using the name to drive the configuration
                name_tag = i.tags.get('Name')

                if 'mongo' in name_tag.lower():
                    execute(_configure_mongo_instance, i)
Beispiel #27
0
def deploy():
	with settings():
		sudo('apt-get update -qq')
		sudo('apt-get install --force-yes -y -qq lxc haproxy python-jinja2 git xfsprogs btrfs-tools python-virtualenv varnish')
		
		# convert filesystem to btrfs
		sudo('umount /mnt/')
		sudo('mkfs.btrfs /dev/xvdb')
		sudo('mount /mnt')

		# install glusterfs
		sudo('apt-get install --force-yes -y -qq software-properties-common')
		sudo('add-apt-repository -y ppa:semiosis/ubuntu-glusterfs-3.3')
		sudo('apt-get update -qq')
		sudo('apt-get install --force-yes -y -qq glusterfs-server glusterfs-client')
		
		#sudo('gluster peer probe ip-10-0-0-55')
		files.append ('/etc/fstab', 'ip-10-0-0-47:/data /data nfs defaults,_netdev,mountproto=tcp,noac 0 0', use_sudo=True)
		sudo('mkdir /data')
		sudo('mount /data')

		#change lxc instances folder
		sudo('mkdir /mnt/lxc')
		sudo('mv /var/lib/lxc /var/lib/lxc.bak')
		sudo('ln -s /mnt/lxc /var/lib/lxc')

		files.put('templates/etc/init.d/celeryd', '/etc/init.d/celeryd', use_sudo=True, mode=0755)
		files.put('templates/etc/default/celeryd', '/etc/default/celeryd', use_sudo=True)

		#files.upload_template("", "/etc/resolv.conf", use_sudo=True)
		try:
			with cd('/root/'):
				sudo('virtualenv --system-site-packages env')
		except:
			pass

		with cd('/root/'):
			sudo('apt-get install --force-yes -y -qq libmysqlclient-dev python-dev')
			with prefix('. /root/env/bin/activate'):
				sudo('pip install jinja2 sqlalchemy mysql-python celery python-dateutil pytz gitpython')	

		with settings(warn_only=True):
			sudo ('killall haproxy')

		with settings(warn_only=True):
			with cd('/data/bin'):
				with prefix('. /root/env/bin/activate'):
					sudo ('/etc/init.d/celeryd create-paths')
					sudo ('/etc/init.d/celeryd restart')
Beispiel #28
0
def finalizeInstallation():
    """
    Final steps of the installation, such as setting swift.conf and restarting services
    """

    confFile = '/etc/swift/swift.conf'
    localFile = 'swift.conf'

    msg = 'Put base config file on node'
    out = put(localFile, confFile)
    if out.succeeded:
        printMessage('good', msg)
    else:
        printMessage('oops', msg)

    # In the [swift-hash] section, configure the hash path prefix and suffix for your environment
    set_parameter(confFile, 'swift-hash', 'swift_hash_path_prefix',
                  env_config.hashPathPrefix)
    set_parameter(confFile, 'swift-hash', 'swift_hash_path_suffix',
                  env_config.hashPathSuffix)

    # In the [storage-policy:0] section, configure the default storage policy
    set_parameter(confFile, 'storage-policy:0', 'name', 'Policy-0')
    set_parameter(confFile, 'storage-policy:0', 'default', 'yes')

    msg = 'Change ownership of the configuration directory to swift'
    run("chown -R swift:swift /etc/swift")

    execute(startServicesController)
    execute(startServicesStorage)
Beispiel #29
0
def _deploy_configs(site_url, user_name=None, user_group=None, upload=True):
    user_name = user_name or site_url.replace('.', '_')
    user_group = user_group or LINUXGROUP
    configs = _get_configs(site_url)
    for service in configs:
        config = configs[service]
        template = config["template"]
        target = join(dirname(template), config["filename"])
        destination = join(config["target folder"], config["filename"])
        if not os.path.exists(target) or os.path.getctime(target) < os.path.getctime(template):
            local('cat %s | sed "s/SITEURL/%s/g" | sed "s/USERNAME/%s/g" | sed "s/USERGROUP/%s/g" > "%s"' %
                  (template, site_url, user_name, user_group, target, ))
        if upload:
            put(target, destination, use_sudo=True)
            with shell_env(FILENAME=destination):
                run(config['install'])
def finalizeInstallation():
    """
    Final steps of the installation, such as setting swift.conf and restarting services
    """

    confFile = '/etc/swift/swift.conf'
    localFile = 'swift.conf'

    msg = 'Put base config file on node'
    out = put(localFile,confFile)
    if out.succeeded:
        printMessage('good',msg)
    else:
        printMessage('oops',msg)


    # In the [swift-hash] section, configure the hash path prefix and suffix for your environment
    set_parameter(confFile,'swift-hash','swift_hash_path_prefix',env_config.hashPathPrefix)
    set_parameter(confFile,'swift-hash','swift_hash_path_suffix',env_config.hashPathSuffix)

    # In the [storage-policy:0] section, configure the default storage policy
    set_parameter(confFile,'storage-policy:0','name','Policy-0')
    set_parameter(confFile,'storage-policy:0','default','yes')

    msg = 'Change ownership of the configuration directory to swift'
    run("chown -R swift:swift /etc/swift")

    execute(startServicesController)
    execute(startServicesStorage)
Beispiel #31
0
def setup():
    
    # install packages
    sudo("aptitude update")
    sudo("aptitude -y install %s" % (' '.join(PACKAGES),))
    
    # clone repo
    copy_keys()
    run("cd %(server_project_base)s; git clone %(git_repo)s;" % env)
    
    # build environment
    sudo("easy_install virtualenv")
    # use --system-site-packages; pyscopg2 is installed globally
    run("virtualenv --system-site-packages %s" % env.environment_dir)
    run('''source %(environment_dir)sbin/activate;
           pip install -r %(server_project_dir)s/requirements.txt''' % env)
    
    # nginx
    files.upload_template("%(project_dir)s/deploy/nginx/server.conf" % env,
                          "/etc/nginx/nginx.conf", use_sudo=True, context=env)
    sudo("rm -f /etc/nginx/sites-available/default")
    sudo("rm -f /etc/nginx/sites-available/%(project_name)s.conf" % env)
    sudo("rm -f /etc/nginx/sites-enabled/%(project_name)s.conf" % env)
    files.upload_template("%s/deploy/nginx/site.conf" % env.project_dir,
                          "/etc/nginx/sites-available/%(project_name)s.conf" % env,
                          context=env, use_sudo=True)
    sudo('''sudo ln -s /etc/nginx/sites-available/%(project_name)s.conf \
            /etc/nginx/sites-enabled/%(project_name)s.conf''' % env)
    
    # uwsgi
    sudo("rm -f /etc/init/uwsgi.conf")
    files.put("%s/deploy/uwsgi/upstart.conf" % env.project_dir,
              "/etc/init/uwsgi.conf", use_sudo=True)
    
    sudo("rm -f /etc/uwsgi/apps-available/%(project_name)s.ini" % env)
    sudo("rm -f /etc/uwsgi/apps-enabled/%(project_name)s.ini" % env)
    files.upload_template("%(project_dir)s/deploy/uwsgi/app.ini" % env,
                          "/etc/uwsgi/apps-available/%(project_name)s.ini" % env, 
                          use_sudo=True, context=env)
    sudo('''sudo ln -s /etc/uwsgi/apps-available/%(project_name)s.ini \
            /etc/uwsgi/apps-enabled/%(project_name)s.ini''' % env)

    # syncdb
    run("""cd %(server_project_dir)s/;
           source %(environment_dir)sbin/activate; 
           python manage.py syncdb --noinput;""" % env)
    deploy()
def install_docker():
    if exists('/usr/bin/docker'):
        return

    # per http://docs.docker.com/engine/installation/ubuntulinux/
    sudo("apt-key adv --keyserver hkp://pgp.mit.edu:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D")

    distrib_codename = run("grep DISTRIB_CODENAME /etc/lsb-release |sed 's/.*=//'")
    put(StringIO.StringIO('deb https://apt.dockerproject.org/repo ubuntu-{} main\n'.format(distrib_codename)),
        '/etc/apt/sources.list.d/docker.list', use_sudo=True)
    sudo('apt-get --yes --quiet update')
    sudo('apt-cache policy docker-engine')
    sudo('apt-get --yes --quiet install docker-engine')
    sudo('adduser {} docker'.format(env.user))
    sudo('sudo service docker restart')
    time.sleep(5)
    disconnect_all() # so we reconnect and to group applies.
    run("docker version")
Beispiel #33
0
def _deploy_configs(user_name=None, user_group=None, upload=True):
    """
    Creates new configs for webserver and services and uploads them to webserver.
    If a custom version of config exists locally that is newer than the template config,
    a new config file will not be created from template.
    """
    site_url = env.site_url
    user_name = user_name or site_url.replace('.', '_')
    user_group = user_group or LINUXGROUP
    configs = _get_configs(site_url)
    for service in configs:  # services are webserver, wsgi service and so on.
        config = configs[service]
        template = config['template']  # template config file
        target = join(
            dirname(template),
            config['filename'])  # name for parsed config file
        # server filepath to place config file. Outside git repo.
        destination = join(config['target folder'], config['filename'])
        if not os.path.exists(target) or os.path.getctime(
                target) < os.path.getctime(template):
            # Generate config file from template if a newer custom file does not exist.
            # use sed to change variable names that will differ between
            # deployments and sites.
            local((
                'cat "{template}" | '
                'sed "s/SITEURL/{url}/g" | '
                'sed "s/USERNAME/{user}/g" | '
                'sed "s/USERGROUP/{group}/g" > '
                '"{filename}"'
            ).format(
                template=template,
                url=site_url,
                user=user_name,
                group=user_group,
                filename=target,
            ))
        if upload:
            # upload config file
            put(target, destination, use_sudo=True)
            with shell_env(FILENAME=destination):
                # run command to make service register new config and restart if
                # needed.
                run(config['install'])
Beispiel #34
0
def deploy():
    print(yellow("Writing nginx config..."))
    nginx_config = render_template("nginx", settings["portal_services"])
    nginx_config_path = os.path.join(
        "/etc/nginx/sites-available",
        settings["portal_services"]["nginx_config"])
    string_to_remote_file(settings["portal_services"]["nginx_config"],
                          nginx_config_path,
                          nginx_config,
                          use_sudo=True)
    print(yellow("  ...Done writing nginx config."))
    print(yellow("Reloading nginx service..."))
    sudo("service nginx reload")
    print(yellow("  ...Done reloading nginx service."))
    print(yellow("Deploying localnet code..."))
    apps_dir = settings["dirs"]["apps_dir"]
    sudo("mkdir -p {}".format(apps_dir))
    put(os.path.join(script_path, "captive_portal"), apps_dir, use_sudo=True)
    put(os.path.join(script_path, "services"), apps_dir, use_sudo=True)
Beispiel #35
0
def _deploy_configs(user_name=None, user_group=None, upload=True):
    """
    Creates new configs for webserver and services and uploads them to webserver.
    If a custom version of config exists locally that is newer than the template config,
    a new config file will not be created from template.
    """
    site_url = env.site_url
    user_name = user_name or site_url.replace('.', '_')
    user_group = user_group or LINUXGROUP
    configs = _get_configs(site_url)
    for service in configs:  # services are webserver, wsgi service and so on.
        config = configs[service]
        template = config['template']  # template config file
        target = join(
            dirname(template),
            config['filename'])  # name for parsed config file
        # server filepath to place config file. Outside git repo.
        destination = join(config['target folder'], config['filename'])
        if not os.path.exists(target) or os.path.getctime(
                target) < os.path.getctime(template):
            # Generate config file from template if a newer custom file does not exist.
            # use sed to change variable names that will differ between
            # deployments and sites.
            local((
                'cat "{template}" | '
                'sed "s/SITEURL/{url}/g" | '
                'sed "s/USERNAME/{user}/g" | '
                'sed "s/USERGROUP/{group}/g" > '
                '"{filename}"'
            ).format(
                template=template,
                url=site_url,
                user=user_name,
                group=user_group,
                filename=target,
            ))
        if upload:
            # upload config file
            put(target, destination, use_sudo=True)
            with shell_env(FILENAME=destination):
                # run command to make service register new config and restart if
                # needed.
                run(config['install'])
Beispiel #36
0
def repo(url="[email protected]:CharlesdeG/Vdeothumbs.git", dest=None, key_file=None):
    """
        Clone repo from `url` to remote `dest` using LOCAL `key_file`.

        `key_file` will be uploaded to the user .ssh dir.
    """
    ssh_dir = env.user_dir / '.ssh'

    if not key_file:
        key_file = env.local_deploy_dir / 'videothumbs_fabric_rsa'

    put(unicode(key_file), unicode(ssh_dir))
    put(key_file + '.pub', unicode(ssh_dir))

    append(ssh_dir / 'config', 'Host github.com')
    append(ssh_dir / 'config', '    IdentityFile %s' % (ssh_dir / key_file.namebase))
    run('chmod 700 "%s" -R' % ssh_dir)

    require.git.working_copy(url,  dest or env.repo_dir)
Beispiel #37
0
 def run(self):
     if fab_exists(self.cert_path):
         reg = self.regenerate
         if self.regenerate is None:
             reg = confirm('regenerate ssl files?', default=False)
         if not reg:
             return
     tmp_dir = '/tmp/{}'.format(uuid.uuid4().hex)
     run('mkdir {}'.format(tmp_dir))
     with cd(tmp_dir):
         if self.certificates is None:
             onetime_pass = uuid.uuid4().hex
             run('openssl genrsa -des3 -out server.key '
                 '-passout pass:{} 2048'.format(onetime_pass))
             run('openssl req -passin pass:{} -new -key server.key '
                 '-out server.csr'.format(onetime_pass))
             run('cp server.key server.key.org')
             run('openssl rsa -passin pass:{} -in server.key.org '
                 '-out server.key'.format(onetime_pass))
             run('openssl x509 -req -days 365 -in server.csr '
                 '-signkey server.key -out server.crt')
         else:
             for i in self.certificates:
                 put(i, 'crt')
                 run('cat crt >> server.crt')
             put(self.private_key, 'server.org.key')
             p = self.private_key_password
             if self.private_key_password is None:
                 p = prompt('enter private key password')
             if p:
                 run('openssl rsa -passin pass:{} -in '
                     'server.org.key -out server.key'.format(p))
             else:
                 run('cp server.org.key server.key')
         _run = run
         if self.use_sudo:
             _run = sudo
         remote_dir = os.path.dirname(self.remote_path)
         _run('mkdir -p {}'.format(remote_dir))
         _run('cp server.crt {}'.format(self.cert_path))
         _run('cp server.key {}'.format(self.key_path))
     run('rm -rf {}'.format(tmp_dir))
Beispiel #38
0
def configure_instances(aws_region, aws_tag):
    """ Configures a tagged AWS instance for use.

    General configuration tasks include RAID configuration.
    Specific tasks are executed based on the AWS "Name" tag

    parameters:
    aws_region -- The AWS region identifier code (us-east-1, us-west-1, us-west-2, etc)
    aws_tag -- The AWS "Name" tag
    """
    ec2_conn = _get_ec2_connection(aws_region)
    reservation = ec2_conn.get_all_instances(filters={'instance-state-name': 'running', 'tag:Name': '{0}'.format(aws_tag)})

    for r in reservation:
        for i in r.instances:
            # tag elastic ips so we can associate it on startup
            elastic_ips = ec2_conn.get_all_addresses(filters={'instance-id':i.id})
            if len(elastic_ips) > 0 and i.tags.get('ElasticIP') is not None:
                i.add_tag('ElasticIP', elastic_ips[0].public_ip)

            with settings(host_string=i.public_dns_name, key_filename=config.LOCAL_AWS_KEY_FILE,
                user=config.AWS_USER_NAME,connection_attempts=10):

                # configure storage (raid vs epehemeral)
                put('uploads/configure-raid.py', '/tmp/configure-raid.py', use_sudo=True)

                raid_level = i.tags.get('RaidLevel')
                attached_devices = i.tags.get('AttachedDevices')

                print('Configuring storage')
                if attached_devices is not None:
                    if raid_level is not None:
                        sudo('python /tmp/configure-raid.py {0} {1}'.format(raid_level, attached_devices))
                    else:
                        _mount_ebs_volume(attached_devices, '/mnt/ebs')

                # node specific configurations
                # note that we're using the name to drive the configuration
                name_tag = i.tags.get('Name')

                if 'mongo' in name_tag.lower():
                    execute(_configure_mongo_instance, i)
Beispiel #39
0
def restore(project_name, dump):
    remote_env = load_environment_dict(project_name)
    database = dj_database_url.parse(remote_env['DATABASE_URL'])
    home_folder = '/home/{project_name}'.format(project_name=project_name)
    sudo('supervisorctl stop {project_name}'.format(project_name=project_name))
    with cd(home_folder), settings(sudo_user='******'), shell_env(
            HOME=home_folder):
        put(dump, '/tmp/{NAME}_latest.dump'.format(**database))
        with settings(sudo_user='******'):
            sudo('dropdb --if-exists -h {HOST} -p {PORT} {NAME}'.format(
                **database))
            sudo('createdb {NAME} -O {USER} -h {HOST} -p {PORT}'.format(
                **database))
        with hide('output'), settings(warn_only=True), StreamFilter(
            [database['PASSWORD']], sys.stdout):
            sudo(
                'PGPASSWORD={PASSWORD} pg_restore --clean --no-acl --no-owner -d {NAME} /tmp/{NAME}_latest.dump'
                .format(**database))
    sudo(
        'supervisorctl start {project_name}'.format(project_name=project_name))
Beispiel #40
0
def setupfirewall():
    """
    Set up a somewhat strict deny-all iptables-based firewall
    Only Allow 80,443, 22 and ICMP in otherwise deny. Also records all denied requests to monitor for abuse.
    """
    loadconfig()

    iptables_rules_file = 'iptables.firewall.rules'
    iptables_init_file = '/etc/network/if-pre-up.d/firewall'

    if exists(iptables_init_file):
        print(yellow('firewall file already exists, doing nothing'))
        return

    put(iptables_rules_file, '/etc/', use_sudo=True)

    iptables_init_text = """
    #!/bin/sh
    /sbin/iptables-restore < /etc/iptables.firewall.rules
    """
    append(iptables_init_file, iptables_init_text.strip(), use_sudo=True)
    sudo('chmod +x %s' % iptables_init_file)
    sudo(iptables_init_file)
Beispiel #41
0
def setupfirewall():
    """
    Set up a somewhat strict deny-all iptables-based firewall
    Only Allow 80,443, 22 and ICMP in otherwise deny. Also records all denied requests to monitor for abuse.
    """
    loadconfig()

    iptables_rules_file = 'iptables.firewall.rules'
    iptables_init_file = '/etc/network/if-pre-up.d/firewall'

    if exists(iptables_init_file):
        print(yellow('firewall file already exists, doing nothing'))
        return
    
    put(iptables_rules_file, '/etc/', use_sudo=True)

    iptables_init_text = """
    #!/bin/sh
    /sbin/iptables-restore < /etc/iptables.firewall.rules
    """
    append(iptables_init_file, iptables_init_text.strip(), use_sudo=True)
    sudo('chmod +x %s' % iptables_init_file)
    sudo(iptables_init_file)
Beispiel #42
0
def _deploy_configs(site_url, user_name=None, user_group=None, upload=True):
    user_name = user_name or site_url.replace('.', '_')
    user_group = user_group or LINUXGROUP
    configs = _get_configs(site_url)
    for service in configs:
        config = configs[service]
        template = config["template"]
        target = join(dirname(template), config["filename"])
        destination = join(config["target folder"], config["filename"])
        if not os.path.exists(target) or os.path.getctime(
                target) < os.path.getctime(template):
            local(
                'cat %s | sed "s/SITEURL/%s/g" | sed "s/USERNAME/%s/g" | sed "s/USERGROUP/%s/g" > "%s"'
                % (
                    template,
                    site_url,
                    user_name,
                    user_group,
                    target,
                ))
        if upload:
            put(target, destination, use_sudo=True)
            with shell_env(FILENAME=destination):
                run(config['install'])
def install_docker():
    if exists('/usr/bin/docker'):
        return

    # per http://docs.docker.com/engine/installation/ubuntulinux/
    sudo(
        "apt-key adv --keyserver hkp://pgp.mit.edu:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D"
    )

    distrib_codename = run(
        "grep DISTRIB_CODENAME /etc/lsb-release |sed 's/.*=//'")
    put(StringIO.StringIO(
        'deb https://apt.dockerproject.org/repo ubuntu-{} main\n'.format(
            distrib_codename)),
        '/etc/apt/sources.list.d/docker.list',
        use_sudo=True)
    sudo('apt-get --yes --quiet update')
    sudo('apt-cache policy docker-engine')
    sudo('apt-get --yes --quiet install docker-engine')
    sudo('adduser {} docker'.format(env.user))
    sudo('sudo service docker restart')
    time.sleep(5)
    disconnect_all()  # so we reconnect and to group applies.
    run("docker version")
Beispiel #44
0
def configureController():

    confFile = '/etc/swift/proxy-server.conf'
    localFile = 'proxy-server.conf'

    # proxyServerConf is a config file made based on this model:
    # https://raw.githubusercontent.com/openstack/swift/stable/juno/etc/proxy-server.conf-sample

    msg = "Put base {} on controller".format(confFile)
    out = put(localFile, confFile)
    if out.succeeded:
        printMessage('good', msg)
    else:
        printMessage('oops', msg)

    # set parameters
    set_parameter(confFile, 'DEFAULT', 'bind_port', '8080')
    set_parameter(confFile, 'DEFAULT', 'user', 'swift')
    set_parameter(confFile, 'DEFAULT', 'swift_dir', '/etc/swift')

    set_parameter(
        confFile, 'pipeline:main', 'pipeline',
        "'authtoken cache healthcheck keystoneauth proxy-logging proxy-server'"
    )
    set_parameter(confFile, 'app:proxy-server', 'allow_account_management',
                  'true')
    set_parameter(confFile, 'app:proxy-server', 'account_autocreate', 'true')

    set_parameter(confFile, 'filter:keystoneauth', 'use',
                  'egg:swift#keystoneauth')
    set_parameter(confFile, 'filter:keystoneauth', 'operator_roles',
                  'admin,_member_')

    set_parameter(confFile, 'filter:authtoken', 'paste.filter_factory',
                  'keystonemiddleware.auth_token:filter_factory')
    set_parameter(confFile, 'filter:authtoken', 'auth_uri',
                  'http://controller:5000/v2.0')
    set_parameter(confFile, 'filter:authtoken', 'identity_uri',
                  'http://controller:35357')
    set_parameter(confFile, 'filter:authtoken', 'admin_tenant_name', 'service')
    set_parameter(confFile, 'filter:authtoken', 'admin_user', 'swift')
    set_parameter(confFile, 'filter:authtoken', 'admin_password',
                  passwd['SWIFT_PASS'])
    set_parameter(confFile, 'filter:authtoken', 'delay_auth_decision', 'true')

    set_parameter(confFile, 'filter:cache', 'memcache_servers',
                  '127.0.0.1:11211')
def configureController():

    confFile = '/etc/swift/proxy-server.conf'
    localFile = 'proxy-server.conf'

    # proxyServerConf is a config file made based on this model: 
    # https://raw.githubusercontent.com/openstack/swift/stable/juno/etc/proxy-server.conf-sample

    msg = "Put base {} on controller".format(confFile)
    out = put(localFile,confFile)
    if out.succeeded:
        printMessage('good',msg)
    else:
        printMessage('oops',msg)

    # set parameters
    set_parameter(confFile,'DEFAULT','bind_port','8080')
    set_parameter(confFile,'DEFAULT','user','swift')
    set_parameter(confFile,'DEFAULT','swift_dir','/etc/swift')


    set_parameter(confFile,'pipeline:main','pipeline',"'authtoken cache healthcheck keystoneauth proxy-logging proxy-server'")
    set_parameter(confFile,'app:proxy-server','allow_account_management','true')
    set_parameter(confFile,'app:proxy-server','account_autocreate','true')

    set_parameter(confFile,'filter:keystoneauth','use','egg:swift#keystoneauth')
    set_parameter(confFile,'filter:keystoneauth','operator_roles','admin,_member_')

    set_parameter(confFile,'filter:authtoken','paste.filter_factory','keystonemiddleware.auth_token:filter_factory')
    set_parameter(confFile,'filter:authtoken','auth_uri','http://controller:5000/v2.0')
    set_parameter(confFile,'filter:authtoken','identity_uri','http://controller:35357')
    set_parameter(confFile,'filter:authtoken','admin_tenant_name','service')
    set_parameter(confFile,'filter:authtoken','admin_user','swift')
    set_parameter(confFile,'filter:authtoken','admin_password',passwd['SWIFT_PASS'])
    set_parameter(confFile,'filter:authtoken','delay_auth_decision','true')

    set_parameter(confFile,'filter:cache','memcache_servers','127.0.0.1:11211')
Beispiel #46
0
def deploy_patches_pack():
    put('patches.zip', 'static/texturepacks')
Beispiel #47
0
def _upload_postactivate(postactivate_file, venv_folder, bin_folder):
    postactivate_path = '%s/postactivate' % (bin_folder, )
    activate_path = '%s/bin/activate' % (venv_folder, )
    append(activate_path, 'source %s' % (postactivate_path, ))
    put(postactivate_file, postactivate_path)
Beispiel #48
0
def __copy_static():
    with cd(remote_dir):
        put("static/js/bundle.js", remote_dir + '/static/js/')
Beispiel #49
0
def step0(domain, *args, **kwargs):
    key_file = '/root/.ssh/id_rsa.pub'
    config = {
        'DOKKU_HOSTNAME': ('hostname', domain),
        'DOKKU_KEY_FILE': ('key_file', key_file),
        'DOKKU_SKIP_KEY_FILE': ('skip_key_file', False),
        'DOKKU_VHOST_ENABLE': ('vhost_enable', False),
        'DOKKU_WEB_CONFIG': ('web_config', False)
    }
    create_static = kwargs.get('create_static_page', True)
    static_git_url = kwargs.get('static_git', environ.get('DOKKU_STATIC_GIT', environ.get('STATIC_GIT')))

    local_pubkey = kwargs.get('PUBLIC_KEY_PATH') or environ.get('DOKKU_PUBLIC_KEY_PATH', environ['PUBLIC_KEY_PATH'])

    if not cmd_avail('docker'):
        docker.install_0()
        # docker.dockeruser_1()
        docker.serve_2()

    put(StringIO('pZPlHOkV649DCepEwf9G'), '/tmp/passwd')

    if not cmd_avail('dokku'):  # is_installed('dokku'):
        run('wget -qN https://packagecloud.io/gpg.key')
        sudo('apt-key add gpg.key')
        append('/etc/apt/sources.list.d/dokku.list',
               'deb https://packagecloud.io/dokku/dokku/ubuntu/ trusty main', use_sudo=True)

        put(StringIO('\n'.join('{com} {com}/{var} {type} {val}'.format(
            com='dokku', var=v[0], val=str(v[1]).lower() if type(v[1]) is BooleanType else v[1], type=(
                lambda t: {type(True): 'boolean', type(''): 'string', type(unicode): 'string'}.get(t, t))(type(v[1])))
                               for k, v in config.iteritems() if v[1] is not None)
                     ),
            '/tmp/dokku-debconf')

        sudo('debconf-set-selections /tmp/dokku-debconf')
        if not exists(key_file):
            sudo('ssh-keygen -t rsa -b 4096 -f {key_file} -N ""'.format(key_file=key_file))

        apt_depends('dokku')
        sudo('dokku plugin:install-dependencies --core')
        put(local_pubkey, key_file)
        sudo('sshcommand acl-add dokku domain {key_file}'.format(key_file=key_file))
        return 'installed dokku'

    if create_static:
        if run('getent passwd static', quiet=True, warn_only=True).failed:
            sudo('adduser static --disabled-password')
            sudo('mkdir /home/static/sites/', user='******')

        upload_template(
            path.join(path.dirname(resource_filename('offregister_dokku', '__init__.py')), 'data', 'static_sites.conf'),
            '/etc/nginx/conf.d/static_sites.conf', use_sudo=True
        )

        if sudo('service nginx status').endswith('stop/waiting'):
            sudo('service nginx start')
        else:
            sudo('service nginx reload')

        # TODO: Abstract this out into a different module, and allow for multiple domains
        if static_git_url:
            ipv4 = '/home/static/sites/{public_ipv4}'.format(public_ipv4=kwargs['public_ipv4'])
            if exists(ipv4):
                sudo('rm -rf {ipv4}'.format(ipv4=ipv4))
            sudo('mkdir -p {ipv4}'.format(ipv4=ipv4), user='******')
            if domain:
                domain = '/home/static/sites/{domain}'.format(domain=domain)
                if not exists(domain):
                    sudo('ln -s {ipv4} {domain}'.format(ipv4=ipv4, domain=domain), user='******')
            xip = '{ipv4}.xip.io'.format(ipv4=ipv4)
            if not exists(xip):
                sudo('ln -s {ipv4} {xip}'.format(ipv4=ipv4, xip=xip), user='******')

            if static_git_url:
                apt_depends('git')

                if isinstance(static_git_url, basestring):
                    clone_or_update(**url_to_git_dict(static_git_url))
                else:
                    clone_or_update(to_dir=ipv4, **static_git_url)

    return 'installed dokku [already]'
Beispiel #50
0
def _put_envs():
    put(os.path.join(PROJECT_DIR, 'envs.json'),
        '~/{}/envs.json'.format(PROJECT_NAME))
Beispiel #51
0
def __copy_files():
    # Copia l'arxiu de local_settings.py al lloc corresponent
    with cd(remote_dir):
        put("_site", remote_dir)
def configureStorage():
    """
    Set the account-, container-, and object-server conf files
    """

    serverConfFiles = ['account-server.conf','container-server.conf','object-server.conf']
    ip = env_config.nicDictionary['compute1']['mgtIPADDR']
    devicepath = swiftGlusterDir
    # devicepath = '/srv/node'

    # save base files into the host
    for fil in serverConfFiles:
        remotefile = '/etc/swift/' + fil
        out = put(fil,remotefile)
        msg = "Save file {} on host {}".format(fil,env.host)
        if out.succeeded:
            printMessage('good', msg)
        else:
            printMessage('oops', msg)

    # set variables that are the same in all conf files
    for confFile in serverConfFiles:
        set_parameter('/etc/swift/' + confFile,'DEFAULT','bind_ip',ip)
        set_parameter('/etc/swift/' + confFile,'DEFAULT','user','swift')
        set_parameter('/etc/swift/' + confFile,'DEFAULT','swift_dir','/etc/swift')
        set_parameter('/etc/swift/' + confFile,'DEFAULT','devices',devicepath)

        set_parameter('/etc/swift/' + confFile,'filter:recon','recon_cache_path','/var/cache/swift')

        # when the device isn't an actual disk, 
        # we need to set mount_check to false
        set_parameter('/etc/swift/' + confFile,'DEFAULT','mount_check','false')


    # Edit the account-server.conf file
    confFile = '/etc/swift/' + serverConfFiles[0]

    set_parameter(confFile,'DEFAULT','bind_port','6002')
    set_parameter(confFile,'pipeline:main','pipeline',"'healthcheck recon account-server'")

    # Edit the /etc/swift/container-server.conf file
    confFile = '/etc/swift/' + serverConfFiles[1]

    set_parameter(confFile,'DEFAULT','bind_port','6001')
    set_parameter(confFile,'pipeline:main','pipeline',"'healthcheck recon container-server'")

    # Edit the /etc/swift/object-server.conf
    confFile = '/etc/swift/' + serverConfFiles[2]

    set_parameter(confFile,'DEFAULT','bind_port','6000')
    set_parameter(confFile,'pipeline:main','pipeline',"'healthcheck recon object-server'")



    msg = 'Ensure proper ownership of the mount point directory structure'
    runCheck(msg, "chown -R swift:swift {}".format(devicepath))

    msg = 'Create the recon directory'
    runCheck(msg, "mkdir -p /var/cache/swift")
    msg = 'Ensure proper ownership of recon directory'
    runCheck(msg, " chown -R swift:swift /var/cache/swift")
Beispiel #53
0
def _put_envs():
    put('envs.json', '~/{}/envs.json'.format(PROJECT_NAME))
    put('bank_envs.json', '~/{}/bank_envs.json'.format(PROJECT_NAME))
Beispiel #54
0
def _copy_files():
    run('mkdir -p builds/adamw523blog')
    put('mysql', 'builds/adamw523blog/')
    put('util', 'builds/adamw523blog/')
    put('wordpress', 'builds/adamw523blog/')
    put('private/fig.yml', 'builds/adamw523blog/')
Beispiel #55
0
def _upload_postactivate(postactivate_file, venv_folder, bin_folder):
    postactivate_path = '%s/postactivate' % (bin_folder,)
    activate_path = '%s/bin/activate' % (venv_folder,)
    append(activate_path, 'source %s' % (postactivate_path,))
    put(postactivate_file, postactivate_path)
def setup_celery_backend(rds_host, user_key, user_secret):
    '''
    The real configuration happens here.
    '''
    logging.info('Updating Ubuntu\'s repository index')
    sudo('apt-get update')

    # Not sure why, but sometimes I get "E: Unable to locate package git"
    # trying to solve this with a sleep.
    time.sleep(2)
    sudo('apt-get update')

    logging.info('Installing ubuntu packages')
    for pkg in ['git', 'python-pip', 'joe', 'python-mysqldb', 'supervisor']:
        sudo('apt-get install -y -q %s' % pkg)

    logging.info('Getting celery application source code')
    with cd('/tmp/'):
        sudo('ssh-keyscan -H github.com > /root/.ssh/known_hosts')

        put(DEPLOY_PRIVATE_PATH, '/root/.ssh/id_rsa', use_sudo=True)
        put(DEPLOY_PUBLIC_PATH, '/root/.ssh/id_rsa.pub', use_sudo=True)

        sudo('chmod 600 /root/.ssh/id_rsa')
        sudo('chmod 600 /root/.ssh/id_rsa.pub')

        sudo('git clone %s' % VULNWEB_REPO)

    logging.info('Installing requirements.txt (this takes time!)')
    with cd('/tmp/nimbostratus-target/'):
        sudo('git checkout %s' % VULNWEB_BRANCH)
        sudo('pip install --use-mirrors --upgrade -r requirements.txt')

    vulnweb_root = '/tmp/nimbostratus-target/servers/django_frontend/vulnweb'

    logging.info('Configuring django-celery application')
    # Overwrite the application configuration files
    upload_template('servers/celery_backend/broker.config',
                    '%s/vulnweb/broker.py' % vulnweb_root,
                    context={
                        'access': user_key,
                        'secret': user_secret
                    },
                    backup=False,
                    use_sudo=True)

    upload_template('servers/celery_backend/databases.config',
                    '%s/vulnweb/databases.py' % vulnweb_root,
                    context={
                        'user': LOW_PRIV_USER,
                        'password': LOW_PRIV_PASSWORD,
                        'host': rds_host
                    },
                    backup=False,
                    use_sudo=True)

    upload_template('servers/celery_backend/supervisor.config',
                    '/etc/supervisor/conf.d/celery.conf',
                    context={'django_root_path': vulnweb_root},
                    backup=False,
                    use_sudo=True)

    sudo('supervisorctl update')

    with cd(vulnweb_root):
        sudo('python manage.py syncdb --noinput')
Beispiel #57
0
def putSampleHtml():
    from fabric.contrib.files import put
    import StringIO
    put(local_path=StringIO.StringIO("hello world"),
        remote_path='/var/www/html/hello.html',
        use_sudo=True)
Beispiel #58
0
def configureStorage():
    """
    Set the account-, container-, and object-server conf files
    """

    serverConfFiles = [
        'account-server.conf', 'container-server.conf', 'object-server.conf'
    ]
    ip = env_config.nicDictionary['compute1']['mgtIPADDR']
    devicepath = swiftGlusterDir
    # devicepath = '/srv/node'

    # save base files into the host
    for fil in serverConfFiles:
        remotefile = '/etc/swift/' + fil
        out = put(fil, remotefile)
        msg = "Save file {} on host {}".format(fil, env.host)
        if out.succeeded:
            printMessage('good', msg)
        else:
            printMessage('oops', msg)

    # set variables that are the same in all conf files
    for confFile in serverConfFiles:
        set_parameter('/etc/swift/' + confFile, 'DEFAULT', 'bind_ip', ip)
        set_parameter('/etc/swift/' + confFile, 'DEFAULT', 'user', 'swift')
        set_parameter('/etc/swift/' + confFile, 'DEFAULT', 'swift_dir',
                      '/etc/swift')
        set_parameter('/etc/swift/' + confFile, 'DEFAULT', 'devices',
                      devicepath)

        set_parameter('/etc/swift/' + confFile, 'filter:recon',
                      'recon_cache_path', '/var/cache/swift')

        # when the device isn't an actual disk,
        # we need to set mount_check to false
        set_parameter('/etc/swift/' + confFile, 'DEFAULT', 'mount_check',
                      'false')

    # Edit the account-server.conf file
    confFile = '/etc/swift/' + serverConfFiles[0]

    set_parameter(confFile, 'DEFAULT', 'bind_port', '6002')
    set_parameter(confFile, 'pipeline:main', 'pipeline',
                  "'healthcheck recon account-server'")

    # Edit the /etc/swift/container-server.conf file
    confFile = '/etc/swift/' + serverConfFiles[1]

    set_parameter(confFile, 'DEFAULT', 'bind_port', '6001')
    set_parameter(confFile, 'pipeline:main', 'pipeline',
                  "'healthcheck recon container-server'")

    # Edit the /etc/swift/object-server.conf
    confFile = '/etc/swift/' + serverConfFiles[2]

    set_parameter(confFile, 'DEFAULT', 'bind_port', '6000')
    set_parameter(confFile, 'pipeline:main', 'pipeline',
                  "'healthcheck recon object-server'")

    msg = 'Ensure proper ownership of the mount point directory structure'
    runCheck(msg, "chown -R swift:swift {}".format(devicepath))

    msg = 'Create the recon directory'
    runCheck(msg, "mkdir -p /var/cache/swift")
    msg = 'Ensure proper ownership of recon directory'
    runCheck(msg, " chown -R swift:swift /var/cache/swift")
Beispiel #59
0
def step0(domain, *args, **kwargs):
    key_file = "/root/.ssh/id_rsa.pub"
    config = {
        "DOKKU_HOSTNAME": ("hostname", domain),
        "DOKKU_KEY_FILE": ("key_file", key_file),
        "DOKKU_SKIP_KEY_FILE": ("skip_key_file", False),
        "DOKKU_VHOST_ENABLE": ("vhost_enable", False),
        "DOKKU_WEB_CONFIG": ("web_config", False),
    }
    create_static = kwargs.get("create_static_page", True)
    static_git_url = kwargs.get(
        "static_git", environ.get("DOKKU_STATIC_GIT",
                                  environ.get("STATIC_GIT")))

    local_pubkey = kwargs.get("PUBLIC_KEY_PATH") or environ.get(
        "DOKKU_PUBLIC_KEY_PATH", environ["PUBLIC_KEY_PATH"])

    if not cmd_avail("docker"):
        docker.install_0()
        # docker.dockeruser_1()
        docker.serve_2()

    put(StringIO("pZPlHOkV649DCepEwf9G"), "/tmp/passwd")

    if not cmd_avail("dokku"):  # is_installed('dokku'):
        run("wget -qN https://packagecloud.io/gpg.key")
        sudo("apt-key add gpg.key")
        append(
            "/etc/apt/sources.list.d/dokku.list",
            "deb https://packagecloud.io/dokku/dokku/ubuntu/ trusty main",
            use_sudo=True,
        )

        put(
            StringIO("\n".join("{com} {com}/{var} {type} {val}".format(
                com="dokku",
                var=v[0],
                val=str(v[1]).lower() if type(v[1]) is type(bool) else v[1],
                type=(lambda t: {
                    type(True): "boolean",
                    type(""): "string",
                    type(str): "string",
                }.get(t, t))(type(v[1])),
            ) for k, v in iteritems(config) if v[1] is not None)),
            "/tmp/dokku-debconf",
        )

        sudo("debconf-set-selections /tmp/dokku-debconf")
        if not exists(key_file):
            sudo('ssh-keygen -t rsa -b 4096 -f {key_file} -N ""'.format(
                key_file=key_file))

        apt_depends("dokku")
        sudo("dokku plugin:install-dependencies --core")
        put(local_pubkey, key_file)
        sudo("sshcommand acl-add dokku domain {key_file}".format(
            key_file=key_file))
        return "installed dokku"

    if create_static:
        if run("getent passwd static", quiet=True, warn_only=True).failed:
            sudo("adduser static --disabled-password")
            sudo("mkdir /home/static/sites/", user="******")

        upload_template(
            path.join(
                path.dirname(
                    resource_filename("offregister_dokku", "__init__.py")),
                "data",
                "static_sites.conf",
            ),
            "/etc/nginx/conf.d/static_sites.conf",
            use_sudo=True,
        )

        if sudo("service nginx status").endswith("stop/waiting"):
            sudo("service nginx start")
        else:
            sudo("service nginx reload")

        # TODO: Abstract this out into a different module, and allow for multiple domains
        if static_git_url:
            ipv4 = "/home/static/sites/{public_ipv4}".format(
                public_ipv4=kwargs["public_ipv4"])
            if exists(ipv4):
                sudo("rm -rf {ipv4}".format(ipv4=ipv4))
            sudo("mkdir -p {ipv4}".format(ipv4=ipv4), user="******")
            if domain:
                domain = "/home/static/sites/{domain}".format(domain=domain)
                if not exists(domain):
                    sudo(
                        "ln -s {ipv4} {domain}".format(ipv4=ipv4,
                                                       domain=domain),
                        user="******",
                    )
            xip = "{ipv4}.xip.io".format(ipv4=ipv4)
            if not exists(xip):
                sudo("ln -s {ipv4} {xip}".format(ipv4=ipv4, xip=xip),
                     user="******")

            if static_git_url:
                apt_depends("git")

                if isinstance(static_git_url, str):
                    clone_or_update(**url_to_git_dict(static_git_url))
                else:
                    clone_or_update(to_dir=ipv4, **static_git_url)

    return "installed dokku [already]"