Example #1
0
def bootstrap(spiceweasel_yml):
    """Bootstrap environment based on a Spiceweasel template

    Args:
        spiceweasel_yml: Local path to the Spiceweasel infrastructure template.

    """

    # TODO(dw): If the user does not provide a Spiceweasel template,
    #           generate one by querying using an API Extension, ID,
    #           and filter

    if os.path.exists(spiceweasel_yml):
        tmpfile = '/tmp/spiceweasel.yml'

        # Use the ruby binaries embedded with Chef. This might not be smart.
        with cd('/opt/chef/embedded/bin'):
            puts(green('Installing Spiceweasel'))
            sudo('./gem install spiceweasel --no-ri --no-rdoc')

            puts(green('Uploading Spiceweasel template %s' % spiceweasel_yml))
            put(spiceweasel_yml, tmpfile)

            # TODO(dw): Incomplete. This will pipe into bash when working
            puts(green('Running Spiceweasel'))
            sudo('./spiceweasel -c /root/.chef/knife.rb %s' % tmpfile)

        run('rm -f %s' % tmpfile)
Example #2
0
def deploy():
    require('hosts')
    require('path')
    print(green("\n#####Beginning deployment to %s & %s on %s\n" % (env.mob_domain, env.stubs_domain, env.hosts[0])))
    env.mob_release_label = None
    env.stubs_release_label = None
    
    if env.is_test:
        create_tag = prompt('Tag this release? [y/N] ')
        if create_tag.lower() == 'y':
            print("\nShowing latest tags for reference\n")
            local('git tag | tail -5')
            env.refspec = prompt('Enter tag name [in format VX_X_X]? ') #Can't use .'s as seperaters as they cause import problems on the server
            local('git tag %s -am "Tagging version %s in fabfile"' % (env.refspec, env.refspec))
            local('git push --tags')
            env.mob_release_label = mob_package_name + '-' + env.refspec
            env.stubs_release_label = stubs_package_name + '-' + env.refspec
    if not env.mob_release_label:
        # An existing tag must be specified
        local('git tag | tail -5')
        env.refspec = prompt('Choose tag to build from: ')
        local('git tag | grep "%s"' % env.refspec)
        env.mob_release_label = mob_package_name + '-' + env.refspec
        env.stubs_release_label = stubs_package_name + '-' + env.refspec
            
#    import time
#    env.release_label = package_name + '-' + time.strftime('%Y%m%d%H%M%S')
    _upload_tar_from_git()
    _install_site()
    _symlink_current_release()
    restart_webserver()
    print(green("\n#####Deployment successful for %s & %s\n" % (env.mob_domain, env.stubs_domain)))
Example #3
0
def s(*args, **kwargs):
    """Set destination servers or server groups by comma delimited list of names"""
    # Load config
    servers = _load_config(**kwargs)
    # If no arguments were recieved, print a message with a list of available configs.
    if not args:
        print 'No server name given. Available configs:'
        for key in servers:
            print colors.green('\t%s' % key)

    # Create `group` - a dictionary, containing copies of configs for selected servers. Server hosts
    # are used as dictionary keys, which allows us to connect current command destination host with
    # the correct config. This is important, because somewhere along the way fabric messes up the
    # hosts order, so simple list index incrementation won't suffice.
    env.group = {}
    # For each given server name
    for name in args:
        #  Recursive function call to retrieve all server records. If `name` is a group(e.g. `all`)
        # - get it's members, iterate through them and create `group`
        # record. Else, get fields from `name` server record.
        # If requested server is not in the settings dictionary output error message and list all
        # available servers.
        _build_group(name, servers)


    # Copy server hosts from `env.group` keys - this gives us a complete list of unique hosts to
    # operate on. No host is added twice, so we can safely add overlaping groups. Each added host is
    # guaranteed to have a config record in `env.group`.
    env.hosts = env.group.keys()
Example #4
0
def upload_cookbooks(url="http://github.com/rcbops/chef-cookbooks",
                     branch="grizzly",
                     directory="/opt/rpcs/chef-cookbooks"):
    """Uploads Chef cookbooks from a git repository

    Args:
        url: URL for Git repository
        branch: Branch of Git repo to use
        directory: Path to clone repository into

    """
    puts(green("Installing git"))
    sudo('apt-get -qq update')
    sudo('apt-get install -qy git')

    # We might want to be more careful here
    if files.exists(directory):
        sudo('rm -rf %s' % directory)

    puts('Cloning chef-cookbooks repository')
    sudo('git clone -q --recursive --depth 1 -b %s %s %s'
         % (branch, url, directory))

    puts(green("Uploading cookbooks"))
    sudo('knife cookbook upload -c /root/.chef/knife.rb -a')

    if files.exists('%s/roles' % directory):
        puts(green("Creating roles"))
        sudo('knife role from file %s/roles/*.rb -c /root/.chef/knife.rb'
             % directory)
Example #5
0
def skeletonize():
    '''Update Skeleton HTML5-Boilerplate.'''
    print green("Skeletonizing the project directory...")

    # Skeleton
    print blue("Installing skeleton HTML5 Boilerplate.")
    os.chdir(PROJ_DIR)
    sh.git.submodule.update(init=True)

    os.chdir(PROJ_DIR + "/skeleton")
    sh.git.pull("origin", "master")
    sh.rsync("-av", "images", "{0}/{1}/static/".format(PROJ_DIR,APP_NAME))
    sh.rsync("-av", "stylesheets",  "{0}/{1}/static/".format(PROJ_DIR,APP_NAME))
    sh.rsync("-av", "index.html",  "{0}/{1}/templates/base_t.html".format(PROJ_DIR,APP_NAME))
    os.chdir(PROJ_DIR)

    # Patch the base template with templating tags
    print blue("Patching the base template.")
    os.chdir(PROJ_DIR + "/{0}/templates/".format(APP_NAME))
    template_patch = open("base_t.patch".format(APP_NAME))
    sh.patch(strip=0, _in=template_patch)
    template_patch.close()
    os.chdir(PROJ_DIR)

    # Jquery
    print blue("Installing jquery 1.9.0.")
    os.chdir(PROJ_DIR + "/" + APP_NAME + "/static/js")
    sh.curl("http://code.jquery.com/jquery-1.9.0.min.js", O=True)
    os.chdir(PROJ_DIR)
def check_mysql_connection(user, password, host, database, port):
    """
    Check that the MySQL connection works.
    """
    # MySQLdb is installed from the pip file so we don't want to
    # import it until now.
    import MySQLdb

    puts(green("Checking MySQL connection and packages"))

    try:
        dbase = MySQLdb.connect(
            host=host, port=int(port), user=user, passwd=password, db=database)

        cursor = dbase.cursor()
        cursor.execute("SELECT VERSION()")
        results = cursor.fetchone()
        # Check if anything at all is returned
        if results:
            puts(green("MySQL connection successful."))
    except:
        puts(red("ERROR IN CONNECTION"))
        puts(red("Install cannot continue without valid database connection."))
        puts(red("Please verify your database credentials and try again."))
        exit()
    puts(green("You are connected to MySQL Server"))
    return False
Example #7
0
def _standby_clone():
    """ With "node1" server running, we want to use the clone standby
    command in repmgr to copy over the entire PostgreSQL database cluster
    onto the "node2" server. """
    # manualy:
    # $ mkdir -p /var/lib/postgresql/9.1/testscluster/
    # $ rsync -avz --rsh='ssh -p2222' [email protected]:/var/lib/postgresql/9.1/testscluster/ /var/lib/postgresql/9.1/testscluster/

    with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):
        puts(green('Start cloning the master'))
        repmgr_clone_command = 'repmgr -D %(slave_pgdata_path)s -d %(sync_db)s -p %(cluster_port)s -U %(sync_user)s -R postgres --verbose standby clone %(pgmaster_ip)s' % env
        puts(green(repmgr_clone_command))
        puts("-" * 40)
        res = sudo(repmgr_clone_command, user='******')
        if 'Can not connect to the remote host' in res or 'Connection to database failed' in res:
            puts("-" * 40)
            puts(green(repmgr_clone_command))
            puts("-" * 40)
            puts("Master server is %s reachable." % red("NOT"))
            puts("%s you can try to CLONE the slave manually [%s]:" % (green("BUT"), red("at your own risk")))
            puts("On the slave server:")
            puts("$ sudo -u postgres rsync -avz --rsh='ssh -p%(master_ssh_port)s' postgres@%(pgmaster_ip)s:%(master_pgdata_path)s %(slave_pgdata_path)s --exclude=pg_xlog* --exclude=pg_control --exclude=*.pid" % env)
            puts("Here:")
            puts("$ fab <cluster_task_name> finish_configuring_slave")
            abort("STOP...")
Example #8
0
def install_node08():
    '''
    Install Node 0.8.18 (0.18.21 for ARM hardwares)
    '''

    if not is_arm():
        require.nodejs.installed_from_source('0.8.18')
        print(green('Node 0.8.18 successfully installed'))
    else:
        version = '0.8.21'
        folder = 'node-v%s-linux-arm-pi' % version
        filename = folder + '.tar.gz'
        require.files.directory('/opt/node', use_sudo=True)
        archive_path = 'http://nodejs.org/dist/v%s/%s' % (version, filename)
        require_file(url=archive_path)
        run('tar -xzf %s' % filename)
        sudo('cp -r %s/* /opt/node' % folder)
        sudo('ln -s /opt/node/bin/node  /usr/local/bin/node')
        sudo('ln -s /opt/node/bin/npm  /usr/local/bin/npm')
        su_delete(folder)
        su_delete(filename)
        result = run('node -v')
        if '0.8.21' in result:
            print(green('Node 0.8.21 successfully installed'))
        else:
            print(red('Something went wrong while installing Node 0.8.21'))
Example #9
0
def notify(msg):
    bar = '+' + '-' * (len(msg) + 2) + '+'
    print green('')
    print green(bar)
    print green("| %s |" % msg)
    print green(bar)
    print green('')
Example #10
0
def install_extension_from_wp(type, name, version):
  if version == 'master':
    if is_extension_installed(type, name):
      sudo('wp %s update %s --allow-root' % (type, name))
    else:
      install_cmd = sudo('wp %s install %s --allow-root' % (type, name))
      if install_cmd.return_code == 0:
        puts(green('%s %s installed successfully.'))
      else:
        puts(red('%s %s could not install.' % (type, name)))
  else:
    if not is_extension_installed(type, name) or version != get_extension_version(type, name):
      puts(cyan('Plugin not installed or installed at the incorrect version, reinstalling'))
      uninstall_extension(type, name)
      if type == 'plugin':
        url = 'http://downloads.wordpress.org/plugin/%s.%s.zip' % (name, version)
      elif type == 'theme':
        url = 'http://wordpress.org/themes/download/%s.%s.zip' % (name, version)

      try:
        install_cmd = sudo('wp %s install %s --allow-root' % (type, url))
        if install_cmd.return_code == 0:
          puts(green('%s %s installed successfully.' % (type, name)))
        else:
          puts(red('Failed to update %s' % name))
      except SystemExit:
        puts(red('Failed to update %s' % name))
Example #11
0
def test_deploy():
    print green("Beginning TEST deployment...")
    print""

    print green("Changing directory to '{0}'".format(DEPLOY_PATH))
    with cd(DEPLOY_PATH):
        sudo('ls -al')
Example #12
0
def _check_pyenv(py_versions):
    """
    Check that pyenv and pyenv-virtualenv are installed and set up the
    compilers/virtual envs in case they do not exist
    """

    if os.system('which pyenv'):
        print red("Can't find pyenv!")
        print yellow("Are you sure you have installed it?")
        sys.exit(-2)
    elif os.system('which pyenv-virtualenv'):
        print red("Can't find pyenv-virtualenv!")
        print yellow("Are you sure you have installed it?")
        sys.exit(-2)

    # list available pyenv versions
    av_versions = os.listdir(os.path.join(env.pyenv_dir, 'versions'))

    for py_version in py_versions:
        if py_version not in av_versions:
            print green('Installing Python {0}'.format(py_version))
            pyenv_cmd('install {0}'.format(py_version), capture=True)

        local("echo \'y\' | pyenv virtualenv {0} indico-build-{0}".format(py_version))

        with pyenv_env(py_version):
            local("pip install -r requirements.dev.txt")
Example #13
0
def install_wordpress(version, host):
  if version == 'latest':
    # Update wordpress to the latest version
    try:
      sudo("wp core update --allow-root")
      print(green('WordPress installed successfully, moving on to configuration.'))
    except SystemExit:
      return sys.exit(red('WordPress core failed to install. Usually this is a network problem.'))
  else:
    if is_correct_wordpress_version(version):
      puts(green('WordPress is installed at the correct version, no need to update.'))
    else:
      # Not the correct version, so upgrade/downgrade to the correct version
      try:
        sudo("wp core update --version=%s --force --allow-root" % version)
        # recheck version now, since we have no way of knowing if the update ended successfully
        if is_correct_wordpress_version(version):
          print(green('WordPress installed successfully at version %s, moving on to configuration.' % version))
        else:
          sys.exit(red('Something went wrong. Exepcted WordPress at %s but did not upgrade successfully.' % version))
      except SystemExit:
        return sys.exit(red('WordPress failed to update!'))

  # Move the configurations into the new wordpress installation
  wp_config = host['wp-config']
  try:
    sudo('cp -R %s configurations' % (wp_config))
    sudo('chmod -R +x configurations')
    sudo('find . -iname \*.php | xargs chmod +x')
    print(green('WordPress fully configured.'))
  except SystemExit:
    return red('WordPress was not properly configured!')
Example #14
0
def install_node10():
    '''
    install node 0.10.26
    '''

    if is_pi():
        with settings(warn_only=True):
            result = run('node -v')
            is_installed = result.find('v0.10.26')
            if is_installed != -1:
                print(green("Node.js is already installed"))
                return True

        version = '0.10.26'
        node_url = 'http://nodejs.org/dist/v{0}/node-v{0}-linux-arm-pi.tar.gz'
        require.file(url=node_url.format(version))
        run('tar -xzvf node-v%s-linux-arm-pi.tar.gz' % version)
        delete_if_exists('/opt/node')
        require.directory('/opt/node', owner='root')
        sudo('mv node-v%s-linux-arm-pi/* /opt/node' % version)
        sudo('ln -s /opt/node/bin/node /usr/local/bin/node')
        sudo('ln -s /opt/node/bin/node /usr/bin/node')
        sudo('ln -s /opt/node/bin/npm /usr/local/bin/npm')
        sudo('ln -s /opt/node/bin/npm /usr/bin/npm')

    else:
        require.nodejs.installed_from_source('0.10.26')

    print(green('node 0.10.26 successfully installed'))
Example #15
0
def make_docs(src_dir=None, build_dir=None):
    """
    Generate Indico docs
    """
    _check_present('pdflatex')

    src_dir = src_dir or env.src_dir

    if build_dir is None:
        target_dir = os.path.join(src_dir, 'indico', 'htdocs', 'ihelp')
    else:
        target_dir = os.path.join(build_dir or env.build_dir, 'indico', 'htdocs', 'ihelp')

    print green('Generating documentation')
    with lcd(os.path.join(src_dir, 'doc')):
        for d in DOC_DIRS:
            with lcd(d):
                local('make html')
                local('make latex')
                local('rm -rf {0}/*'.format(os.path.join(target_dir, 'html')))
                local('mv build/html/* {0}'.format(os.path.join(target_dir, 'html')))

        with lcd(os.path.join('guides', 'build', 'latex')):
            local('make all-pdf')
            local('mv *.pdf {0}'.format(os.path.join(target_dir, 'pdf')))

        print green('Cleaning up')
        for d in DOC_DIRS:
            with lcd(d):
                local('make clean')
Example #16
0
def secure_settings(role='docker'):
    """
    Set the correct permissions for settings.php
    """
    set_env(role)
    fab_run(role, 'chmod 644 {}/sites/default/settings.php'.format(DRUPAL_ROOT))
    print green('settings.php has been secured.')
Example #17
0
def deploy_test(version):
    " Deploys to test environment "
    print green("Deploying version %s to TEST" % str(version))
    local('git checkout release-%s' % version)
    sync("test")
    local('git push origin release-%s' % version)
    print green("Deploy successful!")
def backup_instance(ec2InstanceName):
    conn = aws_connect()
    instance = find_server(conn, ec2InstanceName)
    print(green("Creating machine image from " + instance.id))
    if not instance:
        print(red("Cannot find " + ec2InstanceName))

    print("Backing up instance " + instance.id)
    timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
    imageId = conn.create_image(
        instance.id,
        ec2InstanceName + timestamp,
        no_reboot=False)
    
    image = None

    while image == None:
        try:
            image = conn.get_image(imageId)
        except EC2ResponseError as exc:
            if (exc.error_code != u'InvalidAMIID.NotFound'):
                raise
            time.sleep(10)

    while image.state != u'available':
        print(yellow("image state: %s" % image.state))
        time.sleep(10)
        image.update()

    print(green("image state: %s" % image.state))
    print("Created machine image " + imageId)
Example #19
0
def docker_tryrun(imgname, containername=None, opts='', mounts=None, cmd='', restart=True):
    # mounts is a list of (from, to, canwrite) path tuples. ``from`` is relative to the project root.
    # Returns True if the container was effectively ran (false if it was restarted or aborted)
    if not mounts:
        mounts = []
    if containername and containername in docker_ps(running_only=True):
        print green("%s already running" % containername)
        return False
    if containername and containername in docker_ps(running_only=False):
        if restart:
            print green("%s already exists and is stopped. Restarting!" % containername)
            local('docker restart %s' % containername)
            return True
        else:
            print red("There's a dangling container %s! That's not supposed to happen. Aborting" % containername)
            print "Run 'docker rm %s' to remove that container" % containername
            return False
    for from_path, to_path, canwrite in mounts:
        abspath = from_path
        opt = ' -v %s:%s' % (abspath, to_path)
        if not canwrite:
            opt += ':ro'
        opts += opt
    if containername:
        containername_opt = '--name %s' % containername
    else:
        containername_opt = ''
    local('docker run %s %s %s %s' % (opts, containername_opt, imgname, cmd))
    return True
Example #20
0
def clean_master():
    with settings(warn_only = True), hide('everything'):
        result = run("killall master")
    if result.failed:
        print red("Could not kill master %s!" % env.host)
    else:
        print green("Killed master %s!" % env.host)
Example #21
0
def setup_virtualenv():
    """
    Initially creates the virtualenv in the correct places (creating directory structures as necessary) on the
    remote host.
    If necessary, installs setup_tools, then pip, then virtualenv (packages)
    """
    print green('In packages module.  Installing VirtualEnv on host machine...')
    require('virtualenv_root', provided_by=('setup_env'))

    with cd('/tmp'):
        if env.os == 'ubuntu':
            sudo('apt-get install -y python-setuptools python-setuptools-devel')
        elif env.os == 'redhat':
            sudo('yum install -y python-setuptools python-setuptools-devel')
        else:
            utils.abort('Unrecognized OS %s!' % env.os)
        sudo('easy_install pip')
        sudo('pip install virtualenv', pty=True, shell=True)

        print yellow('Require user:%(sudo_user)s password!' % env)
        with fab_settings(user=env.sudo_user, sudo_prompt='ARemind sudo password: '******'mkdir -p %(www_root)s' % env)
            sudo('chown -R %(www_root)s %(virtualenv_root)s' % env)
            sudo('chgrp -R %(www_root)s %(virtualenv_root)s' % env)
            args = '--clear --distribute'
            sudo('virtualenv %s %s' % (args, env.virtualenv_root), user=env.sudo_user)
    print green('In packages module. Done installing VirtualEnv...')
Example #22
0
def verify_prerequisites():
    """
    Checks to make sure you have curl (with ssh) and git-ftp installed, Attempts installation via brew if you do not.
    """
    with settings(warn_only=True):

        print(colors.cyan("Verifying your installation of curl supports sftp..."))
        ret = local('curl -V | grep sftp', capture=True)
        if ret.return_code == 1:
            print(colors.yellow(
                'Your version of curl does not support sftp. Attempting installation of curl with sftp support via brew...'))
            local('brew update')
            local('brew install curl --with-ssh')
            local('brew link --force curl')
        else:
            print(colors.green('Your installation of curl supports sftp!'))

        print(colors.cyan('Ensuring you have git-ftp installed...'))
        ret = local('git ftp --version', capture=True)
        if ret.return_code == 1:
            print(colors.yellow(
                'You do not have git-ftp installed. Attempting installation via brew...'))
            local('brew update')
            local('brew install git-ftp')
        else:
            print(colors.green('You have git-ftp installed!'))

        print(colors.green('Your system is ready to deploy code!'))
Example #23
0
def herd(name, newborn=False):
    """Do something with the box named <name>"""
    refresh_boxen()
    if not newborn:  # keep the one set in env.box, to get the admin pass
        env.box = env.boxen[name]

    # get the IPs:
    ips = env.box.addresses['public']
    env.box_public_ips = dict([(ip['version'], ip['addr']) for ip in ips])
    host = 'root@{0}:22'.format(env.box_public_ips[4])
    env.hosts = [host]

    # since we might not have auth on this box, we just change the admin pass
    # every time - this means that even if provisioning of SSH keys fails, we
    # can still get access and it also means that fabric doesn't need to know
    # anything!
    env.passwords = getattr(env, 'passwords', {})
    if not host in env.passwords:
        password = getattr(env.box, 'adminPass', False)
        if not password:
            password = str(uuid4())[:12]
            env.box.change_password(password)
            print white("Changed password of server to:"), red(password)
            time.sleep(10)  # takes a while for change_password to work it seems
        env.passwords[host] = password
    else:
        env.password = env.passwords[host]

    print green(
        "Ok, found server {0}:{1}".format(env.box.name, env.box.id))
Example #24
0
def full_update_servers(path='/usr/lib/project/', beat_server='taskserver1', ev_server='taskserver1'):
    """
    Starts, updates, stops, then checks the uwsgi, apache, nginx, 
    celery, celerybeat, and celeryev on all servers
    
    PARAMS:
        path - A string path to use for the codebase on the server. 
                   If you want to use multiple paths, you can send a single string
                   and separate the paths with pipes "|"
                   (defaults to /usr/lib/project/)
                   
        beat_server - the server that celerybeat is installed on (Defaults to taskserver1)
        
        ev_server - the server that celeryev is installed on (Defaults to taskserver1)
    """
    
    print(colors.green("Starting FULL production update for server {host}".format(host=fab.env.host)))
    
    if getattr(fab.env, 'beat_server', None):
        beat_server = fab.env.beat_server
    if getattr(fab.env, 'ev_server', None):
        ev_server = fab.env.ev_server
        
    stop_servers()
    
    update_servers(path=path)
    
    start_servers(beat_server=beat_server, ev_server=ev_server)
    
    check_servers(path=path, beat_server=beat_server, ev_server=ev_server)
    
    print(colors.green("FULL Production update completed for server {host}".format(host=fab.env.host)))
Example #25
0
def gen_supervisor_conf(conf_file='local/supervisord.conf.tmp'):
    """Generates a supervisord conf file based on the django template supervisord.conf"""
    # Back up first
    _backup_file(conf_file)

    env.run('python manage.py supervisor getconfig > %s' % conf_file)
    print green("Wrote supervisor conf file to %s" % conf_file)
Example #26
0
def stop_servers():
    """
    Stops the uwsgi, apache, nginx, celery, celerybeat, and celeryev on all servers
        PARAMS:
        path - A string path to use for the codebase on the server. 
                   If you want to use multiple paths, you can send a single string
                   and separate the paths with pipes "|"
                   (defaults to /usr/lib/project/)
                   
        beat_server - the server that celerybeat is installed on (Defaults to taskserver1)
        
        ev_server - the server that celeryev is installed on (Defaults to taskserver1)
        
    """
    
    print(colors.green("Stopping production for server {host}".format(host=fab.env.host)))
    
    #memcached.memcached_stop()
    uwsgi.uwsgi_stop()    
    apache.apache_stop()
    nginx.nginx_stop()
    celery.celeryd_stop()
    celery.celerybeat_stop()
    celery.celeryevcam_stop()
    
    print(colors.green("Production stopped for server {host}".format(host=fab.env.host)))
Example #27
0
def start_servers(beat_server='taskserver1', ev_server='taskserver1'):
    """
    Starts the uwsgi, apache, nginx, celery, celerybeat, and celeryev on all servers
    
    PARAMS:
        path - A string path to use for the codebase on the server. 
                   If you want to use multiple paths, you can send a single string
                   and separate the paths with pipes "|"
                   (defaults to /usr/lib/project/)
                   
        beat_server - the server that celerybeat is installed on (Defaults to taskserver1)
        
        ev_server - the server that celeryev is installed on (Defaults to taskserver1)
    """
    
    print(colors.green("Starting production for server {host}".format(host=fab.env.host)))
    
    if getattr(fab.env, 'beat_server', None):
        beat_server = fab.env.beat_server
    if getattr(fab.env, 'ev_server', None):
        ev_server = fab.env.ev_server
        
    #memcached.memcached_start()
    uwsgi.uwsgi_start()    
    nginx.nginx_start()
    celery.celeryd_start()
    celery.celerybeat_start(beat_server=beat_server)
    celery.celeryevcam_start(ev_server=ev_server) 
    
    print(colors.green("Production started for server {host}".format(host=fab.env.host)))
Example #28
0
def graze(name, dev=None, mkfs=False):
    """Attaches a server to a storage.

    :param str name: name of storage to attach this server
    :param str dev: name of block device to attach, defaults to /dev/xvdb
    :param bool mkfs: set True to mkfs on the (new) device.

    """
    require('box', provided_by=[herd, birth])
    dev = dev or '/dev/xvdb'
    cbs = pyrax.cloud_blockstorage
    storages = cbs.list()
    for vol in storages:
        if vol.name == name:
            mnt = "/mnt/{0}".format(name)
            execute(run, command='mkdir -p {0}'.format(mnt))
            vol.attach_to_instance(env.box, mountpoint=dev)
            print green(
                "Attached storage {0} to {1} on {2}".format(name, env.box, dev)
            )
            print green("Waiting for volume to attach...")
            pyrax.utils.wait_until(vol, 'status', ['in-use'])
            print green("Attached!")
            if mkfs:
                execute(run, command="mkfs.ext4 {0}".format(dev))
                print green("Made fs (ext4) on {0}".format(dev))
            execute(run, command="mount -t ext4 {0} {1}".format(dev, mnt))
            print green("Mounted {0} at {1}".format(dev, mnt))
            return
def staging():
    # path to the directory on the server where your vhost is set up
    home = '/home/web'
    path = "/home/web/webapps"
    # name of the restart shell script: This should include services restart for gunicorn, nginx, apache or uwsgi processes.
    process = "restart.sh"

    print(red("Beginning Deploy:"))
    with cd("{path}/project_directory".format(path=path)):
        run("pwd")
        branch_name = prompt('Checkout to which branch? ')

        print(green("Pulling {branch_name} from GitHub...".format(branch_name=branch_name)))
        run("git pull origin {branch_name}".format(branch_name=branch_name))

        print(green("Installing requirements..."))
        run("source {home}/.virtualenvs/virtualenv_name/bin/activate && pip install -r requirements.txt".format(home=home))

        print(green("Collecting static files..."))
        run("source {home}/.virtualenvs/virtualenv_name/bin/activate && python manage.py collectstatic --noinput".format(home=home))

        print(green("Migrating the database..."))
        run("source {home}/.virtualenvs/virtualenv_name/bin/activate && python manage.py migrate".format(home=home))

    with cd('{path}'.format(path=path)):
        print(green("Restart the gunicorn and nginx process"))
        run("./restart.sh")

    print(red("DONE!"))
Example #30
0
def install_db_patches():
    with prefix(env.activate):
        with prefix(env.proj_pythonpath):
            with show('stdout'):
                print green("Installing db_patches")
                src = os.path.join(env.proj_dir, 'src', 'install_db_patches.py')
                run('python %s update' % src)
Example #31
0
def restart_gunicorn():
	puts(green("Now Restarting Gunicorn"))
	with settings(warn_only=True):
		result = run("sudo restart ufadhili")
		if result.failed and not confirm("Failed to start gunicorn. Continue anyway?"):
			abort("Aborting at user request.")
Example #32
0
def install_requirements():
    local("pip install -r requirements.txt")
    print(green("Installed requirements.", bold=True))
Example #33
0
def git_pull():
    local("git pull origin")
    print(green("Updated local code.", bold=True))
Example #34
0
 def logged(*args, **kawrgs):
     header = "-" * len(func.__name__)
     _print(green("\n".join([header, func.__name__, header]), bold=True))
     return func(*args, **kawrgs)
Example #35
0
def print_succeed():
    print("[", end="")
    print(green("OK"), end="")
    print("]")
Example #36
0
#!/usr/bin/env python
"""
This script is used to run tests, create a coverage report and output the
statistics at the end of the tox run.
To run this script just execute ``tox``
"""
import re

from fabric.api import local, warn
from fabric.colors import green, red

if __name__ == '__main__':
    local('flake8 --ignore=E126 --ignore=W391 --statistics'
          ' --exclude=submodules,migrations,south_migrations,build .')
    local('coverage run --source="review" manage.py test -v 2'
          ' --traceback --failfast'
          ' --settings=review.tests.settings'
          ' --pattern="*_tests.py"')
    local('coverage html -d coverage --omit="*__init__*,*/settings/*,'
          '*/migrations/*,*/south_migrations/*,*/tests/*,*admin*"')
    total_line = local('grep -n pc_cov coverage/index.html', capture=True)
    percentage = float(re.findall(r'(\d+)%', total_line)[-1])
    if percentage < 100:
        warn(red('Coverage is {0}%'.format(percentage)))
    print(green('Coverage is {0}%'.format(percentage)))
Example #37
0
def deploy():
    _update_app()
    _restart_app()
    print green('Deploy succefully done!')
Example #38
0
def restart_elasticsearch():
	puts(green("Restarting elasticsearch"))
	with(settings(warn_only=True)):
		result = run("sudo /etc/init.d/elasticsearch restart")
		if result.failed and not confirm("There was a problem restarting elasticsearch. Continue anyway?"):
			abort("Aborting at your request")
Example #39
0
def test_configuration(verbose=True):
    errors = []
    parameters_info = []
    if 'project' not in env or not env.project:
        errors.append('Project name missing')
    elif verbose:
        parameters_info.append(('Project name', env.project))
    if 'repository' not in env or not env.repository:
        errors.append('Repository url missing')
    elif verbose:
        parameters_info.append(('Repository url', env.repository))
    if 'hosts' not in env or not env.hosts:
        errors.append('Hosts configuration missing')
    elif verbose:
        parameters_info.append(('Hosts', env.hosts))
    if 'django_user' not in env or not env.django_user:
        errors.append('Django user missing')
    elif verbose:
        parameters_info.append(('Django user', env.django_user))
    if 'django_user_group' not in env or not env.django_user_group:
        errors.append('Django user group missing')
    elif verbose:
        parameters_info.append(('Django user group', env.django_user_group))
    if 'django_user_home' not in env or not env.django_user_home:
        errors.append('Django user home dir missing')
    elif verbose:
        parameters_info.append(('Django user home dir', env.django_user_home))
    if 'projects_path' not in env or not env.projects_path:
        errors.append('Projects path configuration missing')
    elif verbose:
        parameters_info.append(('Projects path', env.projects_path))
    if 'code_root' not in env or not env.code_root:
        errors.append('Code root configuration missing')
    elif verbose:
        parameters_info.append(('Code root', env.code_root))
    if 'django_project_root' not in env or not env.django_project_root:
        errors.append('Django project root configuration missing')
    elif verbose:
        parameters_info.append(
            ('Django project root', env.django_project_root))
    if 'django_project_settings' not in env or not env.django_project_settings:
        env.django_project_settings = 'settings'
    if verbose:
        parameters_info.append(
            ('django_project_settings', env.django_project_settings))
    if 'django_media_path' not in env or not env.django_media_path:
        errors.append('Django media path configuration missing')
    elif verbose:
        parameters_info.append(('Django media path', env.django_media_path))
    if 'django_static_path' not in env or not env.django_static_path:
        errors.append('Django static path configuration missing')
    elif verbose:
        parameters_info.append(('Django static path', env.django_static_path))
    if 'south_used' not in env:
        errors.append('"south_used" configuration missing')
    elif verbose:
        parameters_info.append(('south_used', env.south_used))
    if 'virtenv' not in env or not env.virtenv:
        errors.append('virtenv configuration missing')
    elif verbose:
        parameters_info.append(('virtenv', env.virtenv))
    if 'virtenv_options' not in env or not env.virtenv_options:
        errors.append(
            '"virtenv_options" configuration missing, you must have at least one option'
        )
    elif verbose:
        parameters_info.append(('virtenv_options', env.virtenv_options))
    if 'requirements_file' not in env or not env.requirements_file:
        env.requirements_file = join(env.code_root, 'requirements.txt')
    if verbose:
        parameters_info.append(('requirements_file', env.requirements_file))
    if 'ask_confirmation' not in env:
        errors.append('"ask_confirmation" configuration missing')
    elif verbose:
        parameters_info.append(('ask_confirmation', env.ask_confirmation))
    if 'gunicorn_bind' not in env or not env.gunicorn_bind:
        errors.append('"gunicorn_bind" configuration missing')
    elif verbose:
        parameters_info.append(('gunicorn_bind', env.gunicorn_bind))
    if 'gunicorn_logfile' not in env or not env.gunicorn_logfile:
        errors.append('"gunicorn_logfile" configuration missing')
    elif verbose:
        parameters_info.append(('gunicorn_logfile', env.gunicorn_logfile))
    if 'rungunicorn_script' not in env or not env.rungunicorn_script:
        errors.append('"rungunicorn_script" configuration missing')
    elif verbose:
        parameters_info.append(('rungunicorn_script', env.rungunicorn_script))
    if 'gunicorn_workers' not in env or not env.gunicorn_workers:
        errors.append(
            '"gunicorn_workers" configuration missing, you must have at least one worker'
        )
    elif verbose:
        parameters_info.append(('gunicorn_workers', env.gunicorn_workers))
    if 'gunicorn_worker_class' not in env or not env.gunicorn_worker_class:
        errors.append('"gunicorn_worker_class" configuration missing')
    elif verbose:
        parameters_info.append(
            ('gunicorn_worker_class', env.gunicorn_worker_class))
    if 'gunicorn_loglevel' not in env or not env.gunicorn_loglevel:
        errors.append('"gunicorn_loglevel" configuration missing')
    elif verbose:
        parameters_info.append(('gunicorn_loglevel', env.gunicorn_loglevel))
    if 'nginx_server_name' not in env or not env.nginx_server_name:
        errors.append('"nginx_server_name" configuration missing')
    elif verbose:
        parameters_info.append(('nginx_server_name', env.nginx_server_name))
    if 'nginx_conf_file' not in env or not env.nginx_conf_file:
        errors.append('"nginx_conf_file" configuration missing')
    elif verbose:
        parameters_info.append(('nginx_conf_file', env.nginx_conf_file))
    if 'nginx_client_max_body_size' not in env or not env.nginx_client_max_body_size:
        env.nginx_client_max_body_size = 10
    elif not isinstance(env.nginx_client_max_body_size, int):
        errors.append('"nginx_client_max_body_size" must be an integer value')
    if verbose:
        parameters_info.append(
            ('nginx_client_max_body_size', env.nginx_client_max_body_size))
    if 'nginx_htdocs' not in env or not env.nginx_htdocs:
        errors.append('"nginx_htdocs" configuration missing')
    elif verbose:
        parameters_info.append(('nginx_htdocs', env.nginx_htdocs))

    if 'nginx_https' not in env:
        env.nginx_https = False
    elif not isinstance(env.nginx_https, bool):
        errors.append('"nginx_https" must be a boolean value')
    elif verbose:
        parameters_info.append(('nginx_https', env.nginx_https))

    if 'supervisor_program_name' not in env or not env.supervisor_program_name:
        env.supervisor_program_name = env.project
    if verbose:
        parameters_info.append(
            ('supervisor_program_name', env.supervisor_program_name))
    if 'supervisorctl' not in env or not env.supervisorctl:
        errors.append('"supervisorctl" configuration missing')
    elif verbose:
        parameters_info.append(('supervisorctl', env.supervisorctl))
    if 'supervisor_autostart' not in env or not env.supervisor_autostart:
        errors.append('"supervisor_autostart" configuration missing')
    elif verbose:
        parameters_info.append(
            ('supervisor_autostart', env.supervisor_autostart))
    if 'supervisor_autorestart' not in env or not env.supervisor_autorestart:
        errors.append('"supervisor_autorestart" configuration missing')
    elif verbose:
        parameters_info.append(
            ('supervisor_autorestart', env.supervisor_autorestart))
    if 'supervisor_redirect_stderr' not in env or not env.supervisor_redirect_stderr:
        errors.append('"supervisor_redirect_stderr" configuration missing')
    elif verbose:
        parameters_info.append(
            ('supervisor_redirect_stderr', env.supervisor_redirect_stderr))
    if 'supervisor_stdout_logfile' not in env or not env.supervisor_stdout_logfile:
        errors.append('"supervisor_stdout_logfile" configuration missing')
    elif verbose:
        parameters_info.append(
            ('supervisor_stdout_logfile', env.supervisor_stdout_logfile))
    if 'supervisord_conf_file' not in env or not env.supervisord_conf_file:
        errors.append('"supervisord_conf_file" configuration missing')
    elif verbose:
        parameters_info.append(
            ('supervisord_conf_file', env.supervisord_conf_file))

    if errors:
        if len(errors) == 29:
            ''' all configuration missing '''
            puts(
                'Configuration missing! Please read README.rst first or go ahead at your own risk.'
            )
        else:
            puts('Configuration test revealed %i errors:' % len(errors))
            puts('%s\n\n* %s\n' % ('-' * 37, '\n* '.join(errors)))
            puts('-' * 40)
            puts('Please fix them or go ahead at your own risk.')
        return False
    elif verbose:
        for parameter in parameters_info:
            parameter_formatting = "'%s'" if isinstance(parameter[1],
                                                        str) else "%s"
            parameter_value = parameter_formatting % parameter[1]
            puts('%s %s' % (parameter[0].ljust(27), green(parameter_value)))
    puts('Configuration tests passed!')
    return True
Example #40
0
def reload_nginx():
	puts(green("Reloading nginx"))
	with settings(warn_only=True):
		result = run("sudo /etc/init.d/nginx restart")
		if result.failed and not confirm("Unable to restart nginx. Continue anyway?"):
			abort("Aborting at user request.")	
Example #41
0
def version():
    print green(json.dumps(get_version()))
Example #42
0
def stage(message):
    """
    Show `message` about current stage
    """
    print(green("\n *** {0}".format(message), bold=True))
Example #43
0
def lint():
    """Check for lints"""
    print green('Checking for lints')
    return local("flake8 `find . -name '*.py' -not -path '*env/*'` "
                 "--ignore=E711,E712 --max-line-length=100").succeeded
Example #44
0
def install_openstack(settings_dict,
                      envs=None,
                      verbose=None,
                      url_script=None,
                      prepare=False,
                      force=False,
                      config=None,
                      use_cobbler=False,
                      proxy=None):
    """
        Install OS with COI on build server

    :param settings_dict: settings dictionary for Fabric
    :param envs: environment variables to inject when executing job
    :param verbose: if to hide all output or print everything
    :param url_script: URl of Cisco installer script from Chris
    :param force: Use if you don't connect via interface you gonna bridge later
    :return: always true
    """
    envs = envs or {}
    verbose = verbose or []
    if settings_dict['user'] != 'root':
        use_sudo_flag = True
        run_func = sudo
    else:
        use_sudo_flag = False
        run_func = run
    with open(os.path.join(CONFIG_PATH, "buildserver_yaml")) as f:
        build_yaml = f.read()
    roles_file = role_mappings(config)
    print "Job settings", settings_dict
    print "Env settings", envs
    print >> sys.stderr, roles_file
    with settings(**settings_dict), hide(*verbose), shell_env(**envs):
        with cd("/root/"):
            if proxy:
                warn_if_fail(
                    put(StringIO(
                        'Acquire::http::proxy "http://proxy.esl.cisco.com:8080/";'
                    ),
                        "/etc/apt/apt.conf.d/00proxy",
                        use_sudo=use_sudo_flag))
                warn_if_fail(
                    put(StringIO('Acquire::http::Pipeline-Depth "0";'),
                        "/etc/apt/apt.conf.d/00no_pipelining",
                        use_sudo=use_sudo_flag))
            run_func("apt-get update")
            run_func("apt-get install -y git")
            run_func("git config --global user.email '*****@*****.**';"
                     "git config --global user.name 'Test Node'")
            if not force and not prepare:
                update_time(run_func)
                # avoid grub and other prompts
                warn_if_fail(
                    run_func('DEBIAN_FRONTEND=noninteractive apt-get -y '
                             '-o Dpkg::Options::="--force-confdef" -o '
                             'Dpkg::Options::="--force-confold" dist-upgrade'))
                warn_if_fail(
                    run_func(
                        "git clone -b icehouse "
                        "https://github.com/CiscoSystems/puppet_openstack_builder"
                    ))

                ## run the latest, not i.0 release
                sed("/root/puppet_openstack_builder/install-scripts/cisco.install.sh",
                    "icehouse/snapshots/i.0",
                    "icehouse-proposed",
                    use_sudo=use_sudo_flag)
                sed("/root/puppet_openstack_builder/data/hiera_data/vendor/cisco_coi_common.yaml",
                    "/snapshots/i.0",
                    "-proposed",
                    use_sudo=use_sudo_flag)
                with cd("puppet_openstack_builder/install-scripts"):
                    warn_if_fail(run_func("./install.sh"))
                run_func(
                    "cp /etc/puppet/data/hiera_data/user.common.yaml /tmp/myfile"
                )
                fd = StringIO()
                warn_if_fail(
                    get("/etc/puppet/data/hiera_data/user.common.yaml", fd))
                new_user_common = prepare2role(config, fd.getvalue())
                print " >>>> FABRIC new user.common.file\n", new_user_common
                warn_if_fail(
                    put(StringIO(new_user_common),
                        "/etc/puppet/data/hiera_data/user.common.yaml",
                        use_sudo=use_sudo_flag))
                warn_if_fail(
                    put(StringIO(roles_file),
                        "/etc/puppet/data/role_mappings.yaml",
                        use_sudo=use_sudo_flag))
                fd = StringIO()
                warn_if_fail(get("/etc/puppet/data/cobbler/cobbler.yaml", fd))
                new_cobbler = prepare_cobbler(config, fd.getvalue())
                warn_if_fail(
                    put(StringIO(new_cobbler),
                        "/etc/puppet/data/cobbler/cobbler.yaml",
                        use_sudo=use_sudo_flag))
                resolve_names(run_func, use_sudo_flag)
                result = run_func(
                    'puppet apply -v /etc/puppet/manifests/site.pp')
                tries = 1
                if use_cobbler:
                    cobbler_error = "[cobbler-sync]/returns: unable to connect to cobbler on localhost using cobbler"
                    while cobbler_error in result and tries <= APPLY_LIMIT:
                        time.sleep(60)
                        print >> sys.stderr, "Cobbler is not installed properly, running apply again"
                        result = run_func(
                            'puppet apply -v /etc/puppet/manifests/site.pp',
                            pty=False)
                        tries += 1
                error = "Error:"
                while error in result and tries <= APPLY_LIMIT:
                    time.sleep(60)
                    print >> sys.stderr, "Some errors found, running apply again"
                    result = run_func(
                        'puppet apply -v /etc/puppet/manifests/site.pp',
                        pty=False)
                    tries += 1
                if exists('/root/openrc'):
                    get('/root/openrc', "./openrc")
                else:
                    print(red("No openrc file, something went wrong! :("))
                print(green("Copying logs and configs"))
                collect_logs(
                    run_func=run_func,
                    hostname=config["servers"]["build-server"][0]["hostname"],
                    clean=True)
                print(green("Finished!"))
                return True
            elif not force and prepare:
                return True
    print(green("Finished!"))
    return True
Example #45
0
def pretty_pip(pkglist):
    for pkg in (pkglist):
        fabtools.python.install(pkg)
        print(green(u'Module Python "' + unicode(pkg) + u'" : installé.'))
Example #46
0
def reload():
    execute(bump_version)
    local(
        "wget --delete-after http://admin:admin@localhost:8080/@@reload?action=code"
    )
    print green("RELOADED CODE")
Example #47
0
 def success(self, message):
     # Function to display and tally success of a debug step
     self.passed +=1
     print green(self.message_style.format('DEBUG PASSED', message))
Example #48
0
def clean():
    """Remove all .pyc files."""
    print green('Clean up .pyc files')
    local("find . -name '*.py[co]' -exec rm -f '{}' ';'")
Example #49
0
def tomcat_shutdown():
    """
    Arrêt de tomcat7
    """
    run('sudo service tomcat7 stop')
    print(green('Arrêt de tomcat7 terminé', True))
Example #50
0
def pretty_apt(pkglist):
    for pkg in (pkglist):
        require.deb.package(pkg)
        print(green(u'Paquet Debian "' + unicode(pkg) + u'" : installé.'))
Example #51
0
 def do_node(host):
     puts(green('Installing GlusterFS on {}'.format(host)), flush=True)
     execute(install, host=host)
Example #52
0
def tomcat_startup():
    """
    Démarrage de tomcat7
     """
    run('sudo service tomcat7 start')
    print(green('Redémarrage de tomcat7 terminé', True))
Example #53
0
def GitHub_release(username=None,
                   user='******',
                   token=None,
                   token_file_path="~/.sympy/release-token",
                   repo='sympy',
                   draft=False):
    """
    Upload the release files to GitHub.

    The tag must be pushed up first. You can test on another repo by changing
    user and repo.
    """
    if not requests:
        error(
            "requests and requests-oauthlib must be installed to upload to GitHub"
        )

    release_text = GitHub_release_text()
    version = get_sympy_version()
    short_version = get_sympy_short_version()
    tag = 'sympy-' + version
    prerelease = short_version != version

    urls = URLs(user=user, repo=repo)
    if not username:
        username = raw_input("GitHub username: "******"The tag for this version has not been pushed yet. Cannot upload the release."
        )

    # See http://developer.github.com/v3/repos/releases/#create-a-release
    # First, create the release
    post = {}
    post['tag_name'] = tag
    post['name'] = "SymPy " + version
    post['body'] = release_text
    post['draft'] = draft
    post['prerelease'] = prerelease

    print("Creating release for tag", tag, end=' ')

    result = query_GitHub(urls.releases_url,
                          username,
                          password=None,
                          token=token,
                          data=json.dumps(post)).json()
    release_id = result['id']

    print(green("Done"))

    # Then, upload all the files to it.
    for key in descriptions:
        tarball = get_tarball_name(key)

        params = {}
        params['name'] = tarball

        if tarball.endswith('gz'):
            headers = {'Content-Type': 'application/gzip'}
        elif tarball.endswith('pdf'):
            headers = {'Content-Type': 'application/pdf'}
        elif tarball.endswith('zip'):
            headers = {'Content-Type': 'application/zip'}
        else:
            headers = {'Content-Type': 'application/octet-stream'}

        print("Uploading", tarball, end=' ')
        sys.stdout.flush()
        with open(os.path.join("release", tarball), 'rb') as f:
            result = query_GitHub(urls.release_uploads_url % release_id,
                                  username,
                                  password=None,
                                  token=token,
                                  data=f,
                                  params=params,
                                  headers=headers).json()

        print(green("Done"))
Example #54
0
def info(msg):
    ''' Print a message (Information) '''
    print '\n' + green(msg)
Example #55
0
def delete_stack(stack_name, delete_failed_stacks=False):
    """
    :param stack_name: Stack name or id to delete
    """

    print ""
    cf = boto.cloudformation.connect_to_region(region)

    def find_stack(stack_name):
        """
        :type stack_name: str
        :rtype: boto.cloudformation.stack.Stack
        """
        return _find_stack(stack_name, connection=cf)

    print "Stack Name:        " + stack_name

    stack = find_stack(stack_name)
    if stack:

        print ""

        status = False

        # Allow deletion of failed stacks?
        if delete_failed_stacks and stack.stack_status in [
                'CREATE_FAILED', 'DELETE_FAILED'
        ]:
            print '*** Delete failed stack %(stack_name)s' % {
                'stack_name': stack_name
            }
            status = cf.delete_stack(stack_name)

        else:

            # Delete only completed stacks
            try:
                validate_stack(stack)
            except StackNotReadyException as e:
                print red("*** " + str(e))
                return False

            print '*** Delete existing stack %(stack_name)s' % {
                'stack_name': stack_name
            }
            status = cf.delete_stack(stack_name)

        events = StackEventStream(stack, cf)

        stack = find_stack(stack_name)
        if not status:
            print '*** Stack deleting failed - stack status: ' + red(
                stack.stack_status)
            return False

        _print_events(events.new_events())

        # Update stack status while deleting is still in progress
        while stack.stack_status in ['DELETE_IN_PROGRESS']:
            sleep(5)
            stack = find_stack(stack.stack_id)
            _print_events(events.new_events())

        if stack.stack_status == 'DELETE_COMPLETE':
            print '*** Stack deleting complete - stack status: ' + green(
                stack.stack_status)
            return True
        else:
            print '*** Stack deleting failed - stack status: ' + red(
                stack.stack_status)
            return False

    else:

        print ""

        print red('*** Stack not found')
        return False
Example #56
0
def debugserver():
    print green("We're starting Django's built-in server. Access it through http://demo-django.local:8081")
    with cd(op.join(BASE_PATH, 'src')):
        run('../env/bin/python manage.py runserver 0.0.0.0:8080')
Example #57
0
def cli_command_init(args):
    """
  Creates init files (default folder path is `deploy/`)
  """

    # Show backup message after finish?
    global backup_happened

    def backup_extension(timestamp):
        """
    Generates extenion for backup file
    """

        return str(timestamp) + '.backup'

    def create_or_backup(filepath, content, backup_timestamp=None):
        """
    Creates new file and backups old file if necessary
    """

        if filepath:
            fullpath = os.path.join(os.path.realpath(os.curdir), filepath)
            directory, deployfile = os.path.split(fullpath)
            timestamp = time.time(
            ) if backup_timestamp == None else backup_timestamp
            backup_fullpath = "%s.%s" % (fullpath, backup_extension(timestamp))

            try:
                # deployfile directory (create)
                if not os.path.exists(directory):
                    os.makedirs(directory)

                # deployfile (backup, create)
                if not os.path.exists(fullpath):
                    create_file(filepath, content)
                else:
                    if confirm('File %s exists. Backup?' %
                               red(filepath)) == True:
                        global backup_happened
                        backup_happened = True

                        os.rename(fullpath, backup_fullpath)
                        print(
                            green('Created backup file %s' %
                                  yellow(backup_fullpath)))

                    create_file(filepath, content)

            except Exception as error:
                print(red(error))

    def create_file(filepath, content):
        """
    Writes content to file
    """

        fullpath = os.path.join(os.path.realpath(os.curdir), filepath)

        try:
            with open(filepath, 'w') as f:
                f.write(content)
            print(
                green('Deployfile successfully created in %s' %
                      yellow(fullpath)))
        except Exception as error:
            print(red(error))

    backup_happened = False
    staged = args.get('staged')
    backup_timestamp = time.time()

    if staged == False:
        create_or_backup(args.get('filename'), base_template, backup_timestamp)
    else:
        default_directory = 'deploy/' if staged == None else staged
        templates_files = generate_staged_templates()

        for template_file_key in templates_files.keys():
            filepath = os.path.join(default_directory,
                                    '%s.py' % template_file_key)
            create_or_backup(filepath, templates_files[template_file_key],
                             backup_timestamp)

        cli_staged_init_warning(default_directory)

    # show backup message
    if backup_happened:
        print(
            green('All backuped files has extension ') +
            red(backup_extension(backup_timestamp)))
Example #58
0
def provision_stack_with_template(stack_name,
                                  template_file,
                                  tags=None,
                                  disable_rollback=False,
                                  parameters=None,
                                  return_stack=False):
    """
    Provision a new CloudFormation stack with the given name and template file.
    :type stack_name: str
    :type template_file: str
    :type tags: dict
    :type disable_rollback: bool
    """

    print ""
    cf = boto.cloudformation.connect_to_region(region)

    def find_stack(stack_name):
        """
        :type stack_name: str
        :rtype: boto.cloudformation.stack.Stack
        """
        return _find_stack(stack_name, connection=cf)

    # read the template from the file
    template = None
    with open(template_file) as f:
        template = f.read()

    try:
        valid = validate_template(template, connection=cf)
    except ValidationErrorException as e:
        print red("*** Template validation failed: " + str(e))
        result = CloudFormationResult()
        result.failed = True
        result.succeeded = False
        result.error = str(e)
        return result

    print "Stack Name:        " + stack_name
    print "Stack Description: " + valid.description
    print ""

    stack = find_stack(stack_name)
    if stack:

        try:
            validate_stack(stack)
        except StackNotReadyException as e:
            print red("*** " + str(e))
            result = CloudFormationResult()
            result.failed = True
            result.succeeded = False
            result.error = str(e)
            return result

        print "Stack Status: " + stack.stack_status

        print '*** Updating existing stack %(stack_name)s' % {
            'stack_name': stack_name
        }
        try:
            stack_id = cf.update_stack(stack_name,
                                       template_body=template,
                                       tags=tags,
                                       disable_rollback=disable_rollback,
                                       parameters=parameters)
        except boto.exception.BotoServerError as e:
            if e.message == "No updates are to be performed.":
                print yellow("*** Update failed: " + e.message)

                if return_stack:
                    return stack

                result = CloudFormationResult()
                result.failed = False
                result.succeeded = not result.failed
                result.stack = stack
                return result

            else:
                print red("*** Update failed: " + e.message)
                result = CloudFormationResult()
                result.failed = True
                result.succeeded = not result.failed
                result.error = str(e.message)
                return result

    else:
        print '*** Creating new stack %(stack_name)s' % {
            'stack_name': stack_name
        }
        stack_id = cf.create_stack(stack_name,
                                   template_body=template,
                                   tags=tags,
                                   disable_rollback=disable_rollback,
                                   parameters=parameters)

    # These are the statuses for successful builds
    desired_stack_statuses = [
        "CREATE_COMPLETE", "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS",
        "UPDATE_COMPLETE"
    ]

    if not stack_id:
        # Failed to create new stack
        result = CloudFormationResult()
        result.failed = True
        result.succeeded = not result.failed
        result.error = "Failed to create new stack"
        return result

    stack = find_stack(stack_id)
    events = StackEventStream(stack, cf)

    _print_events(events.new_events())

    # Update stack status while create or update is still in progress
    while stack.stack_status in ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS']:
        sleep(5)
        stack = find_stack(stack_id)
        _print_events(events.new_events())

    if stack.stack_status in desired_stack_statuses:
        print green('*** Stack provisioning complete - stack status: ' +
                    stack.stack_status)

        if return_stack:
            return stack

        result = CloudFormationResult()
        result.failed = False
        result.succeeded = not result.failed
        result.stack = stack
        return result

    else:

        print red('*** Stack provisioning failed - stack status: ' +
                  stack.stack_status)
        result = CloudFormationResult()
        result.failed = True
        result.succeeded = not result.failed
        result.error = "Stack provisioning failed"
        return result
Example #59
0
def test_kraken(instance,
                fail_if_error=True,
                wait=False,
                loaded_is_ok=None,
                hosts=None):
    """Test kraken with '?instance='"""
    instance = get_real_instance(instance)
    wait = get_bool_from_cli(wait)

    hosts = [h.split('@')[1] for h in hosts or instance.kraken_engines]
    will_return = len(hosts) == 1
    for host in hosts:
        request = 'http://{}:{}/{}/?instance={}'.format(
            host, env.kraken_monitor_port, env.kraken_monitor_location_dir,
            instance.name)

        if wait:
            # we wait until we get a response and the instance is 'loaded'
            try:
                result = Retrying(stop_max_delay=env.KRAKEN_RESTART_DELAY * 1000,
                                  wait_fixed=1000, retry_on_result=lambda x: x is None or not x['loaded']) \
                    .call(_test_kraken, request, fail_if_error)
            except Exception as ex:
                print(
                    red("ERROR: could not reach {}, too many retries ! ({})".
                        format(instance.name, ex)))
                result = {'status': False}
        else:
            result = _test_kraken(request, fail_if_error)

        try:
            if result['status'] != 'running':
                if result['status'] == 'no_data':
                    print(
                        yellow(
                            "WARNING: instance {} has no loaded data".format(
                                instance.name)))
                    if will_return:
                        return False
                if fail_if_error:
                    print(
                        red("ERROR: Instance {} is not running ! ({})".format(
                            instance.name, result)))
                    if will_return:
                        return False
                print(
                    yellow("WARNING: Instance {} is not running ! ({})".format(
                        instance.name, result)))
                if will_return:
                    return False

            if not result['is_connected_to_rabbitmq']:
                print(
                    yellow("WARNING: Instance {} is not connected to rabbitmq".
                           format(instance.name)))
                if will_return:
                    return False

            if loaded_is_ok is None:
                loaded_is_ok = wait
            if not loaded_is_ok:
                if result['loaded']:
                    print(
                        yellow("WARNING: instance {} has loaded data".format(
                            instance.name)))
                    if will_return:
                        return True
                else:
                    print(
                        green("OK: instance {} has correct values: {}".format(
                            instance.name, result)))
                    if will_return:
                        return False
            else:
                if result['loaded']:
                    print(
                        green("OK: instance {} has correct values: {}".format(
                            instance.name, result)))
                    if will_return:
                        return True
                elif fail_if_error:
                    abort(
                        red("CRITICAL: instance {} has no loaded data".format(
                            instance.name)))
                else:
                    print(
                        yellow(
                            "WARNING: instance {} has no loaded data".format(
                                instance.name)))
                    if will_return:
                        return False
        except KeyError:
            print(
                red("CRITICAL: instance {} does not return a correct result".
                    format(instance.name)))
            print(result)
            if fail_if_error:
                abort('')
        return False
Example #60
0
def set_default_locale(locale='en_US.UTF-8'):
    locale_setup = 'LC_ALL="{}"'.format(locale)
    append('/etc/environment', locale_setup, use_sudo=True)

    print(green('Default locale set to {}').format(locale))