Example #1
0
    def run(self):
        from soda.host import nginx  # Import here to avoid wrong Fabric --list

        # Stop nginx first
        execute(nginx.stop)

        # Retrieve necessary information from the user
        email = input('Insert the certificate manager email address: ')
        domains = input('Insert the domains to apply: ').split()

        user = settings(user='******')
        cwd = cd(self.roledef.get('letsencrypt_dir', '/opt/letsencrypt'))
        warn_only = settings(warn_only=True)

        # Generate the certificate
        with user, cwd, warn_only:
            result = run((
                './letsencrypt-auto certonly --standalone '
                '--email {email} {domains}'
                .format(
                    email=email,
                    domains=' '.join('-d {}'.format(d) for d in domains),
                )
            ))

        # Display a result message
        if result.succeeded:
            display.success('Key chain successfully created!')
        else:
            display.error('Failed to create key chain.', abort_task=False)

        # Put nginx back up
        execute(nginx.start)
Example #2
0
def deploy():
    prompt("Pivotal Tracker API token: ", "pivotal_token")
    prompt("App port: ", "play_port", "9000", "\d+")
    local("play clean compile stage")
    with lcd("target/universal/stage"):
        local("rm -f conf/site.conf")
        run("mkdir -p %s" % env.release_dir)
        with cd(env.release_dir):
            put("*", ".")
            run("echo %s > REVISION" % local("git rev-parse HEAD", capture=True))
        with cd(env.basedir):
            run("rm -f current")
            run("ln -s %s current" % env.release_dir)
    with settings(warn_only=True):
        run("sudo stop %(app_name)s" % env)
    run("mkdir -p %(shared_dir)s" % env)
    put("manifests", env.shared_dir)
    with cd(env.shared_dir):
        run("""FACTER_app_name=%(app_name)s\
               FACTER_app_path=%(release_dir)s\
               FACTER_manifest_path=%(manifest_dir)s\
               FACTER_play_port=%(play_port)s\
               FACTER_pivotal_token=%(pivotal_token)s\
               sudo -E puppet\
               apply\
               --detailed-exitcodes\
               --modulepath %(puppet_module_dir)s\
               %(manifest_dir)s/bridge.pp;\
               test $? -le 2
            """ % env)
    with settings(warn_only=True):
        run("sudo restart %(app_name)s" % env)
    with cd(env.releases_dir):
        run("ls -1|head -n -5|xargs rm -rf")
Example #3
0
 def put(self, local_path, remote_path, use_sudo, mirror_local_mode, mode,
     local_is_path, temp_dir):
     from fabric.api import sudo, hide
     pre = self.ftp.getcwd()
     pre = pre if pre else ''
     if local_is_path and self.isdir(remote_path):
         basename = os.path.basename(local_path)
         remote_path = posixpath.join(remote_path, basename)
     if output.running:
         print("[%s] put: %s -> %s" % (
             env.host_string,
             _format_local(local_path, local_is_path),
             posixpath.join(pre, remote_path)
         ))
     # When using sudo, "bounce" the file through a guaranteed-unique file
     # path in the default remote CWD (which, typically, the login user will
     # have write permissions on) in order to sudo(mv) it later.
     if use_sudo:
         target_path = remote_path
         hasher = hashlib.sha1()
         hasher.update(env.host_string)
         hasher.update(target_path)
         remote_path = posixpath.join(temp_dir, hasher.hexdigest())
     # Read, ensuring we handle file-like objects correct re: seek pointer
     putter = self.ftp.put
     if not local_is_path:
         old_pointer = local_path.tell()
         local_path.seek(0)
         putter = self.ftp.putfo
     rattrs = putter(local_path, remote_path)
     if not local_is_path:
         local_path.seek(old_pointer)
     # Handle modes if necessary
     if (local_is_path and mirror_local_mode) or (mode is not None):
         lmode = os.stat(local_path).st_mode if mirror_local_mode else mode
         # Cast to octal integer in case of string
         if isinstance(lmode, basestring):
             lmode = int(lmode, 8)
         lmode = lmode & 07777
         rmode = rattrs.st_mode
         # Only bitshift if we actually got an rmode
         if rmode is not None:
             rmode = (rmode & 07777)
         if lmode != rmode:
             if use_sudo:
                 # Temporarily nuke 'cwd' so sudo() doesn't "cd" its mv
                 # command. (The target path has already been cwd-ified
                 # elsewhere.)
                 with settings(hide('everything'), cwd=""):
                     sudo('chmod %o \"%s\"' % (lmode, remote_path))
             else:
                 self.ftp.chmod(remote_path, lmode)
     if use_sudo:
         # Temporarily nuke 'cwd' so sudo() doesn't "cd" its mv command.
         # (The target path has already been cwd-ified elsewhere.)
         with settings(hide('everything'), cwd=""):
             sudo("mv \"%s\" \"%s\"" % (remote_path, target_path))
         # Revert to original remote_path for return value's sake
         remote_path = target_path
     return remote_path
Example #4
0
def kill_scidb():
    with settings(warn_only=True):
        run("ps ax | grep scidb | grep mnt | awk '{print $1}' | xargs kill")
    with settings(warn_only=True):
        run("sleep 5")
    with settings(warn_only=True):
        run("ps ax | grep scidb | grep mnt | awk '{print $1}' | xargs kill -9")
Example #5
0
    def _install_image(self, image_name):
        result = False
        self.logger.debug('Installing image %s'%image_name)
        image_info = self.images_info[image_name]
        webserver = image_info['webserver'] or \
            os.getenv('IMAGE_WEB_SERVER', '10.204.217.158')
        location = image_info['location']
        params = image_info['params']
        image = image_info['name']
        image_type = image_info['type']
        build_path = 'http://%s/%s/%s' % (webserver, location, image)

        #workaround for bug https://bugs.launchpad.net/juniperopenstack/+bug/1447401 [START]
        #Can remove this when above bug is fixed
        if image_type == 'docker':
            for host in self.hosts_dict['nova/docker']:
                username = self.inputs.host_data[host]['username']
                password = self.inputs.host_data[host]['password']
                ip = self.inputs.host_data[host]['host_ip']
                with settings(
                    host_string='%s@%s' % (username, ip),
                        password=password, warn_only=True, abort_on_prompts=False):
                    self.load_docker_image_on_host(build_path)
        #workaround for bug https://bugs.launchpad.net/juniperopenstack/+bug/1447401 [END]

        username = self.inputs.host_data[self.openstack_ip]['username']
        password = self.inputs.host_data[self.openstack_ip]['password']
        build_path = 'http://%s/%s/%s' % (webserver, location, image)
        with settings(
            host_string='%s@%s' % (username, self.openstack_ip),
                password=password, warn_only=True, abort_on_prompts=False):
            return self.copy_and_glance(build_path, image_name, image, params, image_type)
Example #6
0
    def do_config( self ):
        super( GitServer, self ).do_config()
        server = self.parent
        with settings( host_string=server.get_user_host_string() ):
            put( '~/.gitignore' )


        with settings( host_string=server.get_user_host_string(),
                       password=server.password ):

# #             term.printDebug( 'repo: %s' % repr( srv_def.repo ) )
            d = { 'w3_user': server.w3_user,
                  'src': '/home/%(user)s' % server.user,
                  'dest': '/home/%(w3_user)s' % server.w3_user }
            term.printDebug( 'd: %s' % repr( d ) )

            cli_sudo_run( 'mkdir -p %(dest)s' % d,
                          password=server.password )

            cli_sudo_run( 'cp %(src)s/.gitignore %(dest)s' % d,
                          password=server.password )
            cli_sudo_run( 'chown -R %(w3_user)s.  %(dest)s' % d,
                          password=server.password )
            with cd( '/var/www' ):
                cli_sudo_run( 'ln -s /home/%(w3_user)s/.gitignore .gitignore' % d,
                              password=server.password )
Example #7
0
    def config( self ):
        server = self.srv_ctx.server
        bash_file = '.bashrc'
        bash_text_list = [ "alias l='ls -l'",
                           "alias la='ls -al'",
                           "alias lh='ls -lh'" ]

        with settings( host_string=server.get_user_host_string(),
                       password=server.password ):
            for bash_text in bash_text_list:
                fabfiles.append( bash_file, bash_text )

        bash_file = '/root/.bashrc'

        with settings( host_string=server.get_user_host_string(),
                       password=server.password ):
            for bash_text in bash_text_list:
                fabfiles.append( bash_file, bash_text, use_sudo=True )

            dest = '/home/%s' % server.w3_user
            cli_sudo_run( 'mkdir -p %s' % dest,
                          password=server.password )
            cli_sudo_run( 'chown %s. %s' % (server.w3_user,
                                            dest ),
                          password=server.password )
    def push_to_web( self, ts ):
        srv_ctx = self.srv_ctx
        server = self.srv_ctx.server
        dest_folder = srv_ctx.get_abs_web_folder( self.cluster_name,
                                                  self.app_name )
        term.printDebug( 'pushed to web: %s' % dest_folder )
#         dest_folder = '%s/apps/%s' % ( self.srv_ctx.get_abs_w2p_folder( w_type ),
#                                        app_name )
        with settings( host_string=server.get_user_host_string(),
                       password=server.password ):
            with cd( dest_folder ):
                with cd ( 'current' ):
                    prev_folder = run( 'pwd -P' )
                with cd( 'releases' ):
                    cli_sudo_run( 'cp -prH ../current %s' % ts,
                                  password=server.password )
                    with cd( ts ):
                        ret = cli_sudo_run( 'pwd', password=server.password )
                        term.printDebug( ret )
                        cli_sudo_run( 'LANGUAGE=C hg pull', password=server.password )
                        cli_sudo_run( 'mkdir -p static/tmp', password=server.password )

                cli_sudo_run( 'rm -f current', password=server.password )
                cli_sudo_run( 'ln -s releases/%s current' % ts, password=server.password )
                with cd( 'releases/%s' % ts ):
                    with settings( hide( 'stderr', 'warnings' ), warn_only = True ):
                        ret = cli_sudo_run( 'rm -f cache/*', password=server.password )
                        print( blue( ret.stdout ) )
                        ret = cli_sudo_run( 'rm -rf sessions/*', password=server.password )
                        print( blue( ret.stdout ) )
                        ret = cli_sudo_run( 'find -L . -name "*.pyc" |xargs rm',
                                            password=server.password )
                        print( blue( ret.stdout ) )
                sudo( 'chown -R %s. .' % server.w3_user )
        return prev_folder
Example #9
0
def _init(site_code, repo_root, site_root, remote, database, conf=None):
    msgs = []
    _run("virtualenv --no-site-packages %s", site_root)
    _run("mkdir -p %s", site_root.child('media'))
    with settings(warn_only=True):
        _run("ln -s %s %s", '../lib/python2.6/site-packages/django/contrib/admin/media', site_root.child('media').child('admin'))
        _run("ln -s %s %s", repo_root.child('media'), site_root.child('media').child('repo'))
    _run("chmod +x %s", repo_root.child('manage.py'))
    if database == 'sqlite3':
        _run("mkdir -p %s", site_root.child('db'))
        _run("touch %s", site_root.child('db').child('db.sqlite3'))
        _run("chmod -R a+w %s", site_root.child('db'))
    if remote:
        with settings(warn_only=True):
            _run("adduser --gecos=',,,' --disabled-login %s", site_code)
        # Make the site root writable so processes can put log files in it
        _run("chmod a+w %s", site_root)
        if conf:
            tmpconf = _Path('/tmp').child('tmp.httpd.%f.conf' % time.time())
            fabric.contrib.files.upload_template(setup.child(conf), tmpconf, context={
                'subdomain': site_code.replace('_', '-'),
                'site_code': site_code,
                'repo_root': repo_root,
                'site_root': site_root,
            })
            _run("cat %s >> /etc/apache2/httpd.conf", tmpconf)
            msgs.extend([
                '# edit apache configuration',
                'se /etc/apache2/httpd.conf',
                '# restart apache',
                'sudo /etc/init.d/apache2 graceful',
            ])
    for msg in msgs:
        print(msg)
Example #10
0
def rf():
    '''Refresh pypi dependencies everywhere and touch remote wsgi file'''
    _confirm_site('dev')
    with settings(host_string=local_host_string):
        _refresh(REPO_ROOT, SITE_ROOT, UPSTREAM_SITE, remote=False, ikflush=True)
    with settings(host_string=remote_host_string):
        _refresh(REPO_ROOT, SITE_ROOT, UPSTREAM_SITE, remote=True, ikflush=True)
Example #11
0
def bootstrap():
    """Creates initial directories and virtual environment."""

    if (exists('%(project_path)s' % env) and \
        confirm('%(project_path)s already exists. Do you want to continue?' \
                % env, default=False)) or not exists('%(project_path)s' % env):

            print('Bootstrapping initial directories...')

            with settings(hide('warnings', 'running', 'stdout', 'stderr')):

                sudo('mkdir -p %(project_path)s' % env)
                sudo('chown %(user)s:%(user)s %(project_path)s' % env)

                with cd(env.project_path):

                    run('git clone %(project_repo)s .' % env)
                    run('virtualenv --no-site-packages env')

                    with settings(warn_only=True):
                        run('mkdir -m a+w logs')
                        run('mkdir -m g+w tzos/dbs')
                        run('mkdir -m g+w tzos/dbs/dbxml')

    else:

        print('Aborting.')
Example #12
0
def show_smap_status():
    """


    :return: dict
    """

    def parse(lines):
        d={}
        for ln in lines:
           d['abc']='abc'



        return d

    out=[]
    with settings(host_string='[email protected]'):
        out.append(get_uptime())
        out.append(get_version())



    with settings(host_string='[email protected]',password='******'):
        out.append(get_uptime())
        out.append(get_version())

    print out


    # res=parse(out)
    return out
Example #13
0
def patch_text(patch, reverse=False):
    '''Apply text patch to remote server.'''

    def cmd(reverse, dry, path):
        cmd = ['patch']
        if dry: cmd.append('--dry-run')
        if reverse: cmd.append('-R')
        cmd.append('-p1')
        cmd.append('<')
        cmd.append(path)
        return ' '.join(cmd)

    require('base')
    remotep = _remote('%s.patch'%patch)
    put(join(path(patch), 'text.patch'), remotep)
    if confirm('Dry run patch?'):
        with settings(show('stdout'), warn_only = True):
            with cd(env.base):
                run(cmd(reverse, True, remotep))
    if confirm('Execute patch?'):
        with settings(show('stdout'), warn_only = True):
            with cd(env.base):
                run(cmd(reverse, False, remotep))
                log('Applied text patch: %s'%patch)
                run('mv %s patches'%remotep)
Example #14
0
    def test_password_memory_on_user_switch(self):
        """
        Switching users mid-session should not screw up password memory
        """
        def _to_user(user):
            return join_host_strings(user, env.host, env.port)

        user1 = 'root'
        user2 = USER
        with settings(hide('everything'), password=None):
            # Connect as user1 (thus populating both the fallback and
            # user-specific caches)
            with settings(
                password_response(PASSWORDS[user1]),
                host_string=_to_user(user1)
            ):
                run("ls /simple")
            # Connect as user2: * First cxn attempt will use fallback cache,
            # which contains user1's password, and thus fail * Second cxn
            # attempt will prompt user, and succeed due to mocked p4p * but
            # will NOT overwrite fallback cache
            with settings(
                password_response(PASSWORDS[user2]),
                host_string=_to_user(user2)
            ):
                # Just to trigger connection
                run("ls /simple")
            # * Sudo call should use cached user2 password, NOT fallback cache,
            # and thus succeed. (I.e. p_f_p should NOT be called here.)
            with settings(
                password_response('whatever', times_called=0),
                host_string=_to_user(user2)
            ):
                sudo("ls /simple")
    def test_tempest(self):
        self.controller.clone()
        self.compute.clone()

        # Create ml2 config for cisco plugin. Put it to controller node
        with settings(host_string=self.VMs['control'].ip):
            map = [vm.name + '=' + vm.port for vm in self.VMs.itervalues()]
            ml2_conf_io = StringIO.StringIO()
            ml2_conf_io.write(
                ML2_CONF_INI.format(
                    map=os.linesep.join(map),
                    router_ip=NEXUS_IP,
                    username=NEXUS_USER,
                    password=NEXUS_PASSWORD))
            put(ml2_conf_io, os.path.join(self.controller._clone_path,
                                          Q_PLUGIN_EXTRA_CONF_FILES))

        self.assertFalse(self.controller.stack())
        self.assertFalse(self.compute.stack())

        # Add port to data network bridge
        for ip in (self.VMs['control'].ip, self.VMs['compute'].ip):
            with settings(host_string=ip):
                run('sudo ovs-vsctl add-port br-eth1 eth1')

        self.assertFalse(self.controller.run_tempest(TEST_LIST_FILE))
Example #16
0
def deploy(version='master'):
    """Deploys payment code to application server"""
    if not exists('/opt/paysys'):
        setup()

    sudo('mkdir /opt/paysys/.ssh; chmod 0700 /opt/paysys/.ssh')
    put('./files/id_deploy', '/opt/paysys/.ssh/id_rsa', mode=0400, use_sudo=True)
    sudo('chown -R %s. /opt/paysys/.ssh' % app_user)

    with settings(sudo_user=app_user), cd('/opt/paysys'):
        sudo('ssh-keyscan github.com >> ~/.ssh/known_hosts')
        # remove existing dir or git wont clone into it
        sudo('rm -rf /opt/paysys/current/')
        sudo('git clone [email protected]:dgaedcke/paymentSystem.git /opt/paysys/current')
        with cd('/opt/paysys/current'):
            sudo('git checkout %s' % version)
            with virtualenv():
                sudo('pip install -r requirements/common.txt')
        with cd('/opt/paysys/current/source'):
            with virtualenv():
                sudo('pip install --editable .')
        if not exists('/opt/paysys/rules/__init__.py'):
            sudo('/bin/cp -r /opt/paysys/current/rules-initial/* /opt/paysys/rules/')
        sudo('rm -rf /opt/paysys/current/source/rules')
        sudo('ln -sf /opt/paysys/rules /opt/paysys/current/source/rules')

    put('./files/supervisor-payment.conf', '/etc/supervisor.d/payment.conf', use_sudo=True)
    sudo('rm -rf /opt/paysys/current/source/web/static/.cache')

    with settings(sudo_user=app_user), cd('/opt/paysys/current/source/web/static'):
        sudo('bower install --config.interactive=false -s')

    put('./files/local_settings.py', '/opt/paysys/current/source/configuration/local_settings.py', use_sudo=True)
    restartAll()
Example #17
0
def configure():
    """
    Configure PureFTP
    """
    with sudo():
        # Echo configurations
        setup_config()

        for user in blueprint.get('users'):
            username, password = user['username'], user['password']
            if 'homedir' in user:
                user_home = user['homedir']
            else:
                user_home = os.path.join(ftp_root, username)

            passwd_path = '/etc/pure-ftpd/pureftpd.passwd'
            with settings(warn_only=True):
                if files.exists(passwd_path) and run('pure-pw show {}'.format(
                                                     username)).return_code == 0:
                    continue
            debian.mkdir(user_home, owner=ftp_user, group=ftp_group)
            prompts = {
                'Password: '******'Enter it again: ': password
            }
            with settings(prompts=prompts):
                run('pure-pw useradd {} -u {} -g {} -d {}'.format(username, ftp_user, ftp_group,
                                                                  user_home))
        run('pure-pw mkdb')
    restart()
Example #18
0
def postgis_initdb(instance_db):
    """Populate the a database with postgis
        The script init_db.sh is on tyr, but need to create a postgis extension
        on db, so load the sql scripts directly from db server
    """
    if db_has_postgis(instance_db):
        #postgis 2.0 with old postgres version does not seems to be idempotent, so we do the postgis init only once
        print "instance {} already has postgis, skiping postgis init".format(instance_db)
        return
    # # init_db.sh, create this on database host because sql scripts or @localhost
    # # and must be run as postgres user

    psql_version = get_psql_version()
    if psql_version[0:2] == ["9", "1"]:
        postgis_path = '/usr/share/postgresql/9.1/contrib/postgis-1.5'
        with settings(sudo_user='******'):
            sudo('psql --set ON_ERROR_STOP=1 --dbname={} '
                 '--file {}/postgis.sql'.format(instance_db, postgis_path))
            sudo('psql --set ON_ERROR_STOP=1 --dbname={}'
                ' --file {}/spatial_ref_sys.sql'.format(instance_db, postgis_path))
    elif psql_version[0:2] >= ["9", "3"]:
        with settings(sudo_user='******'):
            sudo('psql -c "CREATE EXTENSION  IF NOT EXISTS postgis;" --dbname={}'.format(instance_db))
    else:
        raise EnvironmentError("Bad version of postgres")
Example #19
0
def create_individual_database(dbname):
    """Create a single database. Used by initial_create_databases
    and copy_databases."""

    dbinfo = config('databases')[dbname]

    if 'postgis' in dbinfo['ENGINE']:
        if confirm("Create database %s on %s with template postgis?" % (
                dbinfo['NAME'], dbinfo['HOST']), default=False):
            print(green("The password required is that of user 'postgres'." +
                        " Often equal to 'postgres'."))
            cmd = ('createdb -h {host} -U postgres ' +
                   '--template=template_postgis --owner={user} {database}')
            # Use warn-only so that the script doesn't halt if the db
            # exists already
            with settings(warn_only=True):
                local(cmd.format(host=dbinfo['HOST'], user=dbinfo['USER'],
                                 database=dbinfo['NAME']))
    else:
        if confirm("Create database %s on %s?" % (
                dbinfo['NAME'], dbinfo['HOST']), default=False):
            print(green("The password required is that of user 'postgres'." +
                        " Often equal to 'postgres'."))
            cmd = ('createdb -h {host} -U postgres ' +
                   '--owner={user} {database}')
            # Use warn-only so that the script doesn't halt if the db
            # exists already
            with settings(warn_only=True):
                local(cmd.format(host=dbinfo['HOST'], user=dbinfo['USER'],
                                 database=dbinfo['NAME']))
Example #20
0
    def config( self ):
        super( HgServer, self ).config()
        server = self.srv_ctx.server
        with settings( host_string=server.get_user_host_string() ):
            put( '~/.hgignore' )
            put( '~/.hgrc_hooks' )

        hg_file = '/etc/mercurial/hgrc'
        hg_text_list = [ '[trusted]',
                         'users = %s, www-data' % server.user ]

        with settings( host_string=server.get_user_host_string(),
                       password=server.password ):
            for hg_text in hg_text_list:
                fabfiles.append( hg_file, hg_text, use_sudo=True )

# #             term.printDebug( 'repo: %s' % repr( srv_def.repo ) )
            d = { 'w3_user': server.w3_user,
                  'src': '/home/%s' % server.user,
                  'dest': '/home/%s' % server.w3_user }
            term.printDebug( 'd: %s' % repr( d ) )

            cli_sudo_run( 'cp %(src)s/.hgignore %(dest)s' % d,
                          password=server.password )
            cli_sudo_run( 'cp %(src)s/.hgrc_hooks %(dest)s' % d,
                          password=server.password )
            cli_sudo_run( 'chown  %(w3_user)s.  %(dest)s/.hg*' % d,
                          password=server.password )
            with cd( '/var/www' ):
                if not files.exists( '.hgignore',
                                     use_sudo=True ):
                    cli_sudo_run( 'ln -s /home/%(w3_user)s/.hgignore .hgignore' % d,
                                  password=server.password )
Example #21
0
def build():
    """ Build or update the virtualenv """
    with settings(hide('stdout')):
        print(cyan('\nUpdating venv, installing packages...'))
        # remove non-virtualenv Fabric, because it causes problems
        # TODO: the logic of this will need to be re-thought
        do('sudo pip uninstall Fabric -qy', capture=True)
        do('[ -e venv ] || virtualenv venv --no-site-packages')
        # annoyingly, pip prints errors to stdout (instead of stderr), so we
        # have to check the return code and output only if there's an error.
        with settings(warn_only=True):
            # upgrade pip so we can take advantage of fancy new features
            pupgrade = do('venv/bin/pip install pip --upgrade')
            if pupgrade.failed:
                print(red(pupgrade))
                abort("pip upgrade unsuccessful %i" % pupgrade.return_code)
            # http://www.pip-installer.org/en/latest/cookbook.html#fast-local-installs
            do('mkdir -p pyarchives', capture=True)
            do('venv/bin/pip install -v --download pyarchives -r requirements.txt --upgrade')
            # if webassets==dev exists, rename it
            # if os.path.exists("pyarchives/master"):
                # os.rename("pyarchives/master", "pyarchives/webassets-dev.tar.gz")
            pip = do(
                'venv/bin/pip install --no-index --find-links=file://vagrant/pyarchives -r requirements.txt --upgrade',
                capture=True)
        if pip.failed:
            print(red(pip))
            abort("pip exited with return code %i" % pip.return_code)
Example #22
0
def setup_ufw_rules():
    """
    Setup ufw app rules from application templates and settings UFW_RULES

    """
    
    #current rules
    current_rules = server_state('ufw_rules')
    if current_rules: current_rules = set(current_rules)
    else: current_rules = set([])
    role = env.role_lookup[env.host_string]
    firewall_rules = set(env.firewall_rules[role])
    if not env.overwrite and firewall_rules == current_rules: return
    if env.verbosity:
        print 'CONFIGURING FIREWALL'
    
    delete_rules = current_rules - firewall_rules
    for rule in delete_rules:
        with settings(warn_only=True):
            if env.verbosity:
                print 'ufw delete', rule
            sudo('ufw delete %s'% rule)
    new_rules = firewall_rules - current_rules        
    for rule in new_rules:
        with settings(warn_only=True):
            if env.verbosity:
                print 'ufw', rule
            sudo('ufw %s'% rule)
    set_server_state('ufw_rules',list(firewall_rules))

        
    output = sudo('ufw reload')
    if env.verbosity:
        print output
Example #23
0
def deploy(warn=True):
    """
    Deploy to remote environment.
    """
    with settings(host_string=remote()):

        if warn:
            # Display confirmation
            print('\nYou are about to deploy current branch ' + yellow('%(branch)s' % env, bold=True) + ' to ' + yellow('%(host_string)s' % env, bold=True) + '.')
            if not confirm('Continue?', default=False):
                abort('User aborted')

        # git-pull
        local('git pull origin %(branch)s' % env)

        # Initialise remote git repo
        run('git init %(remote_path)s' % env)
        run('git --git-dir=%(remote_path)s/.git config receive.denyCurrentBranch ignore' % env)
        local('cat %s/deployment/hooks/post-receive.tpl | sed \'s/\$\$BRANCH\$\$/%s/g\' > /tmp/post-receive.tmp' % (os.path.dirname(__file__), env.branch))
        put('/tmp/post-receive.tmp', '%(remote_path)s/.git/hooks/post-receive' % env)
        run('chmod +x %(remote_path)s/.git/hooks/post-receive' % env)

        # Add server to the local git repo config (as a git remote)
        with settings(hide('warnings'), warn_only=True):
            local('git remote rm %(branch)s' % env)
        local('git remote add %(branch)s ssh://%(user)s@%(host_string)s:%(port)s%(remote_path)s' % env)

        # Push to origin
        print(cyan('\nDeploying code...'))
        local('git push origin %(branch)s' % env)
        local('GIT_SSH=fabfile/deployment/ssh git push %(branch)s %(branch)s' % env)
Example #24
0
def deploy_linode():
    with settings(warn_only=True):
        # Check for remote dir
        run('mkdir %s' % remote_path)

    # TODO: use restart instead of stop and start to lower downtime
    with settings(warn_only=True):
        run('cd %(remote_path)s; forever stop main.js' %
            {'remote_path': remote_path})

    run('cd %(remote_path)s; rm -rf *' % {'remote_path': remote_path})

    put('%s/bundle.tar.gz' % local_path, remote_path)

    run('cd %(remote_path)s; tar -xmf bundle.tar.gz' %
        {'remote_path': remote_path})
    run('cd %(remote_path)s; mv bundle/* ./' % {'remote_path': remote_path})

    run('cd %(remote_path)s; rm -rf bundle' % {'remote_path': remote_path})
    run('cd %(remote_path)s; rm bundle.tar.gz' % {'remote_path': remote_path})

    # Recompile fibers for remote box
    run('cd %(remote_path)s/server; rm -rf node_modules/fibers' %
        {'remote_path': remote_path})
    sudo('cd %(remote_path)s/server; npm install [email protected] --production' %
         {'remote_path': remote_path})

    # Recompile xml2js for remote box.
    # This can be removed, changed, or repeated as needed
    run('cd %(remote_path)s; mkdir node_modules' %
        {'remote_path': remote_path})
    sudo('cd %(remote_path)s/; npm install xml2js' %
         {'remote_path': remote_path})

    start_app()
Example #25
0
def install_mysql():
    """Install and configure a standard mysql5.1 server"""
    env.datadir = "/usr/local/var/db/mysql"
    env.root_password = '******'
    
    stop_mysql()
    
    local("PACKAGESITE=%s; export PACKAGESITE" % env.packagesite)
    pkgsite = "PACKAGESITE=%s; export PACKAGESITE" % env.packagesite
    with settings(hide('warnings'), warn_only=True):
        local("%s;pkg_add -r mysql-server-5.1.34.tbz" % pkgsite)
    
    ##TODO: Move this file to a more appropriate svn repo    
    local("svn cat %s/build_system/trunk/install/my.cnf > /etc/my.cnf" % env.svn_repo)
    local("rm -rf %s" % env.datadir)
    local("mkdir -p %s" % env.datadir)
    local("chown mysql:mysql %s" % env.datadir)
    local("/usr/local/bin/mysql_install_db --user=mysql --datadir=%s" % env.datadir)
    _process_conf("/etc/rc.conf", "mysql_enable=YES")
    _process_conf("/etc/rc.conf", 'mysql_dbdir=/usr/local/var/db/mysql')
    
    local("/usr/local/etc/rc.d/mysql-server start")
    
    ## Setup root access
    with settings(hide('warnings'), warn_only=True):
        local("/usr/local/bin/mysqladmin -u root password '%s'" % env.root_password)
    
    ## TODO: REPLACE WITH file in svn
    local('''mysql -u root -pqwerty1 -e "GRANT ALL ON *.* TO 'pbrian'@'10.137.0.0/255.255.0.0' IDENTIFIED BY 'devpass';"''')
    local('''mysql -u root -pqwerty1 -e "GRANT ALL ON *.* TO 'backup'@'10.137.0.0/255.255.0.0' IDENTIFIED BY 'backpass';"''')
    local('''mysql -u root -pqwerty1 -e "GRANT ALL ON *.* TO 'robc'@'10.137.0.0/255.255.0.0' IDENTIFIED BY 'devpass';"''')
    local("mysqladmin flush-privileges -u root -p%s" % env.root_password)
    
    start_mysql()
    local("unset PACKAGESITE")
Example #26
0
def exists(path, use_sudo=False, verbose=False):
    """
    Return True if given path exists on the current remote host.

    If ``use_sudo`` is True, will use `sudo` instead of `run`.

    `exists` will, by default, hide all output (including the run line, stdout,
    stderr and any warning resulting from the file not existing) in order to
    avoid cluttering output. You may specify ``verbose=True`` to change this
    behavior.
    """
    runner = tasks_runner_factory.get_fabric_method(super=use_sudo)
    if runner.__class__ == DevelopmentMethod or runner.__class__ == DevelopmentSuperMethod:
        func = partial(runner.execute, capture=True) #bind capture
        cmd = '[ -f %s ] && echo "Found" || echo "Not found"' % path
    else:
        func = runner.execute
        cmd = 'test -e "$(echo %s)"' % path

    # If verbose, run normally
    if verbose:
        with settings(warn_only=True):
            return not func(cmd).failed
    # Otherwise, be quiet
    with settings(hide('everything'), warn_only=True):
        return not func(cmd).failed
Example #27
0
    def run_tempest(self, *args, **kwargs):
        logger.info('Run tempest tests')

        test_list_path = kwargs.get('test_list_path')
        all_plugin = kwargs.get('all_plugin', False) is True
        env_args = kwargs.get('env_args', {})
        testr_args = ' '.join(args)
        with settings(host_string=self.host_string):
            if test_list_path:
                temp_path = '/tmp/tempest_tests.txt'
                put(test_list_path, temp_path)
                testr_args += ' --load-list="{tests_list}"'.format(
                    tests_list=temp_path)

            with cd(self._tempest_path), settings(warn_only=True):
                envs = ''
                for k, v in env_args.items():
                    envs += "%s=%s " % (k, v)

                if not envs:
                    cmd = 'tox'
                else:
                    cmd = 'env %s tox' % envs.strip()

                if all_plugin:
                    cmd += ' -eall-plugin'
                else:
                    cmd += ' -eall'

                cmd += ' -- {0}'.format(testr_args)

                # Run tempest
                res = run(cmd)
                logger.info(res)
        return res.failed
Example #28
0
def start_webservers():
    """
    Start apache2 and start/reload nginx
    """
    with settings(warn_only=True):
        if env.verbosity:
            print env.host, "STARTING apache2"
        a = sudo("/etc/init.d/apache2 start")
        if env.verbosity:
            print "", a

    if a.failed:
        print "ERROR: /etc/init.d/apache2 start failed"
        print env.host, a
        sys.exit(1)
    if env.verbosity:
        # Reload used to fail on Ubuntu but at least in 10.04 it works
        print env.host, "RELOADING nginx"
    with settings(warn_only=True):
        s = run("/etc/init.d/nginx status")
        if "running" in s:
            n = sudo("/etc/init.d/nginx reload")
        else:
            n = sudo("/etc/init.d/nginx start")
    if env.verbosity:
        print " *", n
    return True
Example #29
0
def install_sysv_init_script(nsd, nuser, cfgfile):
    """
    Install the init script for an operational deployment of RASVAMT.
    The init script is an old System V init system.
    In the presence of a systemd-enabled system we use the update-rc.d tool
    to enable the script as part of systemd (instead of the System V chkconfig
    tool which we use instead). The script is prepared to deal with both tools.
    """
    with settings(user=env.AWS_SUDO_USER):

        print(red("Initialising deployment"))

        sudo('usermod -a -G {} ec2-user'.format(env.APP_USER))
        sudo('mkdir -p /etc/supervisor/')
        sudo('mkdir -p /etc/supervisor/conf.d/')

        sudo('cp {0}/fabfile/init/sysv/nginx.conf /etc/nginx/.'.
            format(APP_source_dir()))
        # copy nginx and supervisor conf files
        sudo('cp {0}/fabfile/init/sysv/rasvama.conf /etc/supervisor/conf.d/.'.
            format(APP_source_dir()))

        # create the DB
        with settings(user=env.APP_USER):
            virtualenv('cd {0}/db; python create_db.py'.format(env.APP_SRC_DIR))

        #check if nginx is running else
        print(red("Server setup and ready to deploy"))
        #Think we have

    success("Init scripts installed")
Example #30
0
 def test_just_ssh_config(self):
     """
     No env.key_filename + valid ssh_config = ssh value
     """
     with settings(use_ssh_config=True, ssh_config_path=support("ssh_config")):
         for val in ["", []]:
             with settings(key_filename=val):
                 eq_(key_filenames(), ["foobar.pub"])
Example #31
0
 def put(self, local_path, remote_path, use_sudo, mirror_local_mode, mode,
     local_is_path, temp_dir, callback=None):
     from fabric.api import sudo, hide
     pre = self.ftp.getcwd()
     pre = pre if pre else ''
     if local_is_path and self.isdir(remote_path):
         basename = os.path.basename(local_path)
         remote_path = posixpath.join(remote_path, basename)
     if output.running:
         print("[%s] put: %s -> %s" % (
             env.host_string,
             _format_local(local_path, local_is_path),
             posixpath.join(pre, remote_path)
         ))
     # When using sudo, "bounce" the file through a guaranteed-unique file
     # path in the default remote CWD (which, typically, the login user will
     # have write permissions on) in order to sudo(mv) it later.
     if use_sudo:
         target_path = remote_path
         hasher = hashlib.sha1()
         hasher.update(env.host_string)
         hasher.update(target_path)
         remote_path = posixpath.join(temp_dir, hasher.hexdigest())
     # Read, ensuring we handle file-like objects correct re: seek pointer
     putter = self.ftp.put
     if not local_is_path:
         old_pointer = local_path.tell()
         local_path.seek(0)
         putter = self.ftp.putfo
     rattrs = putter(local_path, remote_path,
             self._create_paramiko_callback(str(local_path), callback))
     if not local_is_path:
         local_path.seek(old_pointer)
     # Handle modes if necessary
     if (local_is_path and mirror_local_mode) or (mode is not None):
         lmode = os.stat(local_path).st_mode if mirror_local_mode else mode
         # Cast to octal integer in case of string
         if isinstance(lmode, basestring):
             lmode = int(lmode, 8)
         lmode = lmode & 07777
         rmode = rattrs.st_mode
         # Only bitshift if we actually got an rmode
         if rmode is not None:
             rmode = (rmode & 07777)
         if lmode != rmode:
             if use_sudo:
                 # Temporarily nuke 'cwd' so sudo() doesn't "cd" its mv
                 # command. (The target path has already been cwd-ified
                 # elsewhere.)
                 with settings(hide('everything'), cwd=""):
                     sudo('chmod %o \"%s\"' % (lmode, remote_path))
             else:
                 self.ftp.chmod(remote_path, lmode)
     if use_sudo:
         # Temporarily nuke 'cwd' so sudo() doesn't "cd" its mv command.
         # (The target path has already been cwd-ified elsewhere.)
         with settings(hide('everything'), cwd=""):
             sudo("mv \"%s\" \"%s\"" % (remote_path, target_path))
         # Revert to original remote_path for return value's sake
         remote_path = target_path
     return remote_path
Example #32
0
    def test_rsyslog_messages_in_db_through_contrail_logs(self):
        """Tests related to rsyslog."""
        result = True
        if len(self.inputs.compute_ips) < 1:
            self.logger.warn(
                "Minimum 1 compute nodes are needed for this test to run")
            self.logger.warn("Exiting since this test can't be run.")
            return True

        # get a collector less compute node for the test.
        # so that we can test remote syslog messages.
        try:
            list_of_collector_less_compute = \
                list(set(self.inputs.compute_ips) -
                     set(self.inputs.collector_ips))
            if not list_of_collector_less_compute:
                self.logger.error(
                    "Colud not get a collector less compute node for the test."
                )
                return False
            comp_node_ip = list_of_collector_less_compute[0]
        except Exception as e:
            self.logger.error(
                "Colud not get a collector less compute node for the test.")
            self.logger.exception("Got exception as %s" % (e))

        # bring up rsyslog client-server connection with udp protocol.
        restart_collector_to_listen_on_port(self, self.inputs.collector_ips[0])
        restart_rsyslog_client_to_send_on_port(self, comp_node_ip,
                                               self.inputs.collector_ips[0])

        # send 10 syslog messages and verify through contrail logs. There might
        # be loss, but few messages should reach. Or else the test fails.

        # copy test files to the compute node.
        with settings(
                host_string='%s@%s' %
            (self.inputs.host_data[self.inputs.cfgm_ips[0]]['username'],
             self.inputs.cfgm_ips[0]),
                password=self.inputs.host_data[
                    self.inputs.cfgm_ips[0]]['password'],
                warn_only=True,
                abort_on_prompts=False):
            host_node = {
                'username': self.inputs.host_data[comp_node_ip]['username'],
                'password': self.inputs.host_data[comp_node_ip]['password'],
                'ip': comp_node_ip
            }
            path = os.getcwd() + '/serial_scripts/rsyslog/mylogging.py'
            copy_file_to_server(host_node, path, '~/', 'mylogging.py')
            path = os.getcwd() + '/serial_scripts/rsyslog/message.txt'
            copy_file_to_server(host_node, path, '~/', 'message.txt')

        # send 10 messages with delay.
        with settings(
                host_string='%s@%s' %
            (self.inputs.host_data[comp_node_ip]['username'], comp_node_ip),
                password=self.inputs.host_data[comp_node_ip]['password'],
                warn_only=True,
                abort_on_prompts=False):
            cmd = "chmod 777 ~/mylogging.py"
            run('%s' % (cmd), pty=True)
            cmd = "~/mylogging.py send_10_log_messages_with_delay"
            run('%s' % (cmd), pty=True)

        # verify through contrail logs.
        with settings(
                host_string='%s@%s' %
            (self.inputs.host_data[self.inputs.collector_ips[0]]['username'],
             self.inputs.collector_ips[0]),
                password=self.inputs.host_data[
                    self.inputs.collector_ips[0]]['password'],
                warn_only=True,
                abort_on_prompts=False):
            cmd = "contrail-logs --last 2m --message-type Syslog | "
            cmd = cmd + "grep 'Test Syslog Messages being sent.' | wc -l"
            output = run('%s' % (cmd), pty=True)
            if int(output) == 0:
                self.logger.error(
                    "No syslog messages in contrail-logs.Seems to be an issue")
                return False
            elif int(output) < 7:
                self.logger.info(
                    "Remote syslog message test connection setup passed.")
                self.logger.warn(
                    "There is 30% message loss. There might be an issue.")
            else:
                self.logger.info(
                    "Remote syslog message test connection setup passed.")
                self.logger.info(
                    "Remote syslog message test over UDP connection passed.")

        # change rsyslog client server connection to tcp.
        update_rsyslog_client_connection_details(
            self,
            node_ip=comp_node_ip,
            server_ip=self.inputs.cfgm_ips[0],
            protocol='tcp',
            restart=True)

        # send 10 log messages without any delay.
        # no message should be lost in a tcp connection.
        with settings(
                host_string='%s@%s' %
            (self.inputs.host_data[comp_node_ip]['username'], comp_node_ip),
                password=self.inputs.host_data[comp_node_ip]['password'],
                warn_only=True,
                abort_on_prompts=False):
            cmd = "~/mylogging.py send_10_log_messages"
            run('%s' % (cmd), pty=True)

        # verify through contrail logs.
        time.sleep(2)  # for database sync.
        with settings(
                host_string='%s@%s' %
            (self.inputs.host_data[self.inputs.collector_ips[0]]['username'],
             self.inputs.collector_ips[0]),
                password=self.inputs.host_data[
                    self.inputs.collector_ips[0]]['password'],
                warn_only=True,
                abort_on_prompts=False):
            cmd = "contrail-logs --last 2m --message-type Syslog | "
            cmd = cmd + "grep 'Test Syslog Messages being sent without delay.' "
            cmd = cmd + "| wc -l"
            output = run('%s' % (cmd), pty=True)
            if int(output) != 10:
                self.logger.error(
                    "Seeing message loss in tcp which is unexpected.")
                return False
            else:
                self.logger.info("Remote syslog message test over TCP passed.")

        # verify 'category' query of contrail logs.
            cmd = "contrail-logs --last 3m --category cron | "
            cmd = cmd + "grep 'Test Syslog Messages being sent without delay.' "
            cmd = cmd + "| wc -l"
            output = run('%s' % (cmd), pty=True)
            if int(output) != 10:
                self.logger.error("'category' based query FAILED.")
                return False
            else:
                self.logger.info("'category' based query PASSED.")

        # send syslog messages of all facilities and severities and verify.
        with settings(
                host_string='%s@%s' %
            (self.inputs.host_data[comp_node_ip]['username'], comp_node_ip),
                password=self.inputs.host_data[comp_node_ip]['password'],
                warn_only=True,
                abort_on_prompts=False):
            cmd = "~/mylogging.py send_messages_of_all_facility_and_severity"
            run('%s' % (cmd), pty=True)

        # verify all facilities and severities through contrail logs.
        time.sleep(2)  # for database sync.
        result_flag = 0
        list_of_facility = [
            'LOG_KERN', 'LOG_USER', 'LOG_MAIL', 'LOG_DAEMON', 'LOG_AUTH',
            'LOG_NEWS', 'LOG_UUCP', 'LOG_LOCAL0', 'LOG_CRON', 'LOG_SYSLOG',
            'LOG_LOCAL1'
        ]
        list_of_severity = [
            'LOG_EMERG', 'LOG_ALERT', 'LOG_CRIT', 'LOG_ERR', 'LOG_WARNING',
            'LOG_NOTICE', 'LOG_INFO', 'LOG_DEBUG'
        ]

        with settings(
                host_string='%s@%s' %
            (self.inputs.host_data[self.inputs.collector_ips[0]]['username'],
             self.inputs.collector_ips[0]),
                password=self.inputs.host_data[
                    self.inputs.collector_ips[0]]['password'],
                warn_only=True,
                abort_on_prompts=False):
            cmd = "contrail-logs --last 2m --message-type Syslog | "
            cmd = cmd + "grep 'Test Message from' > ~/result.txt "
            run('%s' % (cmd), pty=True)
            for each_facility in list_of_facility:
                for each_severity in list_of_severity:
                    cmd = "cat ~/result.txt | grep 'Test Message from " + \
                        str(each_facility) + " with severity " + \
                        str(each_severity) + ".' | wc -l"
                    output = run('%s' % (cmd), pty=True)
                    if int(output) != 1:
                        self.logger.error(
                            "Syslog message with facility %s and severity %s was not received"
                            % (each_facility, each_severity))
                        result_flag = 1
                    else:
                        self.logger.info(
                            "Syslog message with facility %s and severity %s was received"
                            % (each_facility, each_severity))

        if result_flag != 0:
            self.logger.error(
                "Error in transmitting or receiving some syslog facilities and severities"
            )
            return False

        # verify 'level' query of contrail logs.
        bug_1353624_fix = False
        if bug_1353624_fix:
            with settings(host_string='%s@%s' %
                          (self.inputs.host_data[self.inputs.collector_ips[0]]
                           ['username'], self.inputs.collector_ips[0]),
                          password=self.inputs.host_data[
                              self.inputs.collector_ips[0]]['password'],
                          warn_only=True,
                          abort_on_prompts=False):
                for each_severity in list_of_severity:
                    cmd = "contrail-logs --last 4m --level " + \
                        str(each_severity) + " | wc -l"
                    output = run('%s' % (cmd), pty=True)
                    if int(output) < 1:
                        self.logger.error(
                            "Syslog message with severity %s was not found." %
                            (each_severity))
                        result_flag = 1
                    else:
                        self.logger.info(
                            "Syslog message with severity %s was found." %
                            (each_severity))

            if result_flag != 0:
                self.logger.error(
                    "Error in transmitting or receiving some syslog severities."
                )
                return False

        # send 100 messages grater than 1024 bytes with a delay of 1 sec
        # between each message. This delay factor is expected to be brought
        # down through bug fix.
        with settings(
                host_string='%s@%s' %
            (self.inputs.host_data[comp_node_ip]['username'], comp_node_ip),
                password=self.inputs.host_data[comp_node_ip]['password'],
                warn_only=True,
                abort_on_prompts=False):
            cmd = "~/mylogging.py send_messages_grater_than_1024_bytes"
            run('%s' % (cmd), pty=True, timeout=120)

        # verify all the 10 messages of 1074 bytes are received.
        time.sleep(2)  # for database sync.
        with settings(
                host_string='%s@%s' %
            (self.inputs.host_data[self.inputs.collector_ips[0]]['username'],
             self.inputs.collector_ips[0]),
                password=self.inputs.host_data[
                    self.inputs.collector_ips[0]]['password'],
                warn_only=True,
                abort_on_prompts=False):
            cmd = "contrail-logs --last 3m --message-type Syslog | "
            cmd = cmd + "grep 'This is a 1074 byte message' | wc -l"
            output = run('%s' % (cmd), pty=True)
            if int(output) != 100:
                self.logger.error(
                    "Failed to receive all the messages greater than 1024 bytes over a tcp connection."
                )
                return False
            else:
                self.logger.info(
                    "Successfully received all the messages greater than 1024 bytes over a tcp connection."
                )

        # setup all nodes to send syslog messages to a single collector and verify,
        # syslog messages are written into the db poperly with the node name
        # tags as expected.
        for each_node_ip in self.inputs.host_ips:
            update_rsyslog_client_connection_details(
                self,
                node_ip=each_node_ip,
                server_ip=self.inputs.collector_ips[0],
                protocol='tcp',
                restart=True)

        # copy test files to all the nodes and send remote syslog test message.
        with settings(
                host_string='%s@%s' %
            (self.inputs.host_data[self.inputs.cfgm_ips[0]]['username'],
             self.inputs.cfgm_ips[0]),
                password=self.inputs.host_data[
                    self.inputs.cfgm_ips[0]]['password'],
                warn_only=True,
                abort_on_prompts=False):
            for each_node_ip in self.inputs.host_ips:
                host_node = {
                    'username':
                    self.inputs.host_data[each_node_ip]['username'],
                    'password':
                    self.inputs.host_data[each_node_ip]['password'],
                    'ip': each_node_ip
                }
                path = os.getcwd() + '/serial_scripts/rsyslog/mylogging.py'
                copy_file_to_server(host_node, path, '~/', 'mylogging.py')
                path = os.getcwd() + '/serial_scripts/rsyslog/message.txt'
                copy_file_to_server(host_node, path, '~/', 'message.txt')

        for each_node_ip in self.inputs.host_ips:
            with settings(
                    host_string='%s@%s' %
                (self.inputs.host_data[each_node_ip]['username'],
                 each_node_ip),
                    password=self.inputs.host_data[each_node_ip]['password'],
                    warn_only=True,
                    abort_on_prompts=False):
                cmd = "chmod 777 ~/mylogging.py"
                run('%s' % (cmd), pty=True)
                cmd = "~/mylogging.py send_test_log_message"
                run('%s' % (cmd), pty=True)
                # time.sleep(0.5)

        # verify syslog messages from each node through contrail logs.
        result_flag = 0
        with settings(
                host_string='%s@%s' %
            (self.inputs.host_data[self.inputs.collector_ips[0]]['username'],
             self.inputs.collector_ips[0]),
                password=self.inputs.host_data[
                    self.inputs.collector_ips[0]]['password'],
                warn_only=True,
                abort_on_prompts=False):
            cmd = "contrail-logs --last 2m --message-type Syslog | grep 'Test Syslog Messages from different nodes.'"
            output = run('%s' % (cmd), pty=True)
            for each_host in self.inputs.host_names:
                search_pattern = ' ' + each_host + ' '
                if search_pattern in output:
                    self.logger.info(
                        "Syslog message from host %s received successfully." %
                        (each_host))
                else:
                    self.logger.error(
                        "Syslog message from host %s was not received." %
                        (each_host))
                    result_flag = 1

        if result_flag != 0:
            self.logger.error(
                "Error in transmitting or receiving some syslog messages")
            return False

        return True
Example #33
0
    def update_default_quota_list(self,
                                  subnet=-1,
                                  virtual_network=-1,
                                  floating_ip=-1,
                                  logical_router=-1,
                                  security_group_rule=-1,
                                  virtual_machine_interface=-1,
                                  security_group=-1):
        contrail_api_file_list = []

        # Copy the contrail-api.conf to /tmp/ and restore it later

        for cfgm_ip in self.inputs.cfgm_ips:
            api_file_name = get_random_name('contrail-api')
            contrail_api_file_list.append(api_file_name)
            issue_cmd = "cp " + contrail_api_conf + " /tmp/" + \
                api_file_name
            output = self.inputs.run_cmd_on_server(
                cfgm_ip, issue_cmd, self.inputs.host_data[cfgm_ip]['username'],
                self.inputs.host_data[cfgm_ip]['password'])

        self.addCleanup(self.restore_default_quota_list,
                        contrail_api_file_list)

        # Fetch the contrail-api.conf from all config nodes to active cfgm's
        # /tmp/

        api_file_list = []
        api_file_list.append(contrail_api_conf)
        for cfgm_ip in self.inputs.cfgm_ips[1:]:
            with settings(
                    host_string='%s@%s' %
                (self.inputs.host_data[cfgm_ip]['username'], cfgm_ip)):
                api_conf_file = get_random_name('contrail-api-remote')
                api_file_list.append('/tmp/' + api_conf_file)
                get(contrail_api_conf, '/tmp/' + api_conf_file)

        # Edit the contrail-api.conf files adding quota sections

        for api_conf in api_file_list:
            api_conf_h = open(api_conf, 'a')
            config = ConfigParser.ConfigParser()
            config.add_section('QUOTA')
            config.set('QUOTA', 'subnet', subnet)
            config.set('QUOTA', 'virtual_network', virtual_network)
            config.set('QUOTA', 'floating_ip', floating_ip)
            config.set('QUOTA', 'logical_router', logical_router)
            config.set('QUOTA', 'security_group', security_group)
            config.set('QUOTA', 'security_group_rule', security_group_rule)
            config.set('QUOTA', 'virtual_machine_interface',
                       virtual_machine_interface)
            config.write(api_conf_h)
            api_conf_h.close()

        # Put back updated contrail-api.conf file to respective cfgm's remove
        # temp files

        count = 1
        for cfgm_ip in self.inputs.cfgm_ips[1:]:
            with settings(
                    host_string='%s@%s' %
                (self.inputs.host_data[cfgm_ip]['username'], cfgm_ip)):
                put(api_file_list[count], contrail_api_conf)
                issue_cmd = "rm -rf " + api_file_list[count]
                output = self.inputs.run_cmd_on_server(
                    cfgm_ip, issue_cmd,
                    self.inputs.host_data[cfgm_ip]['username'],
                    self.inputs.host_data[cfgm_ip]['password'])
                count = count + 1

        # Restart contrail-api service on all cfgm nodes

        for cfgm_ip in self.inputs.cfgm_ips:
            self.inputs.restart_service('contrail-api', [cfgm_ip])

        cs_obj = ContrailStatusChecker(self.inputs)
        clusterstatus, error_nodes = cs_obj.wait_till_contrail_cluster_stable()
        assert clusterstatus, ('Hash of error nodes and services : %s' %
                               (error_nodes))
Example #34
0
 def get_ring_description(self):
     with settings(host_string=env.hosts[0]):
         with hide('output'):
             ring_description = sudo(self.nodetool_path + ' ring')
     return ring_description
Example #35
0
def execute(task, *args, **kwargs):
    """
    Execute ``task`` (callable or name), honoring host/role decorators, etc.

    ``task`` may be an actual callable object, or it may be a registered task
    name, which is used to look up a callable just as if the name had been
    given on the command line (including :ref:`namespaced tasks <namespaces>`,
    e.g. ``"deploy.migrate"``.

    The task will then be executed once per host in its host list, which is
    (again) assembled in the same manner as CLI-specified tasks: drawing from
    :option:`-H`, :ref:`env.hosts <hosts>`, the `~fabric.decorators.hosts` or
    `~fabric.decorators.roles` decorators, and so forth.

    ``host``, ``hosts``, ``role``, ``roles`` and ``exclude_hosts`` kwargs will
    be stripped out of the final call, and used to set the task's host list, as
    if they had been specified on the command line like e.g. ``fab
    taskname:host=hostname``.

    Any other arguments or keyword arguments will be passed verbatim into
    ``task`` (the function itself -- not the ``@task`` decorator wrapping your
    function!) when it is called, so ``execute(mytask, 'arg1',
    kwarg1='value')`` will (once per host) invoke ``mytask('arg1',
    kwarg1='value')``.

    :returns:
        a dictionary mapping host strings to the given task's return value for
        that host's execution run. For example, ``execute(foo, hosts=['a',
        'b'])`` might return ``{'a': None, 'b': 'bar'}`` if ``foo`` returned
        nothing on host `a` but returned ``'bar'`` on host `b`.

        In situations where a task execution fails for a given host but overall
        progress does not abort (such as when :ref:`env.skip_bad_hosts
        <skip-bad-hosts>` is True) the return value for that host will be the
        error object or message.

    .. seealso::
        :ref:`The execute usage docs <execute>`, for an expanded explanation
        and some examples.

    .. versionadded:: 1.3
    .. versionchanged:: 1.4
        Added the return value mapping; previously this function had no defined
        return value.
    """
    my_env = {'clean_revert': True}
    results = {}
    # Obtain task
    is_callable = callable(task)
    if not (is_callable or _is_task(task)):
        # Assume string, set env.command to it
        my_env['command'] = task
        task = crawl(task, state.commands)
        if task is None:
            abort("%r is not callable or a valid task name" % (task, ))
    # Set env.command if we were given a real function or callable task obj
    else:
        dunder_name = getattr(task, '__name__', None)
        my_env['command'] = getattr(task, 'name', dunder_name)
    # Normalize to Task instance if we ended up with a regular callable
    if not _is_task(task):
        task = WrappedCallableTask(task)
    # Filter out hosts/roles kwargs
    new_kwargs, hosts, roles, exclude_hosts = parse_kwargs(kwargs)
    # Set up host list
    my_env['all_hosts'] = task.get_hosts(hosts, roles, exclude_hosts,
                                         state.env)

    parallel = requires_parallel(task)
    if parallel:
        # Import multiprocessing if needed, erroring out usefully
        # if it can't.
        try:
            import multiprocessing
        except ImportError:
            import traceback
            tb = traceback.format_exc()
            abort(tb + """
    At least one task needs to be run in parallel, but the
    multiprocessing module cannot be imported (see above
    traceback.) Please make sure the module is installed
    or that the above ImportError is fixed.""")
    else:
        multiprocessing = None

    # Get pool size for this task
    pool_size = task.get_pool_size(my_env['all_hosts'], state.env.pool_size)
    # Set up job queue in case parallel is needed
    queue = multiprocessing.Queue() if parallel else None
    jobs = JobQueue(pool_size, queue)
    if state.output.debug:
        jobs._debug = True

    # Call on host list
    if my_env['all_hosts']:
        # Attempt to cycle on hosts, skipping if needed
        for host in my_env['all_hosts']:
            try:
                results[host] = _execute(task, host, my_env, args, new_kwargs,
                                         jobs, queue, multiprocessing)
            except NetworkError as e:
                results[host] = e
                # Backwards compat test re: whether to use an exception or
                # abort
                if not state.env.use_exceptions_for['network']:
                    func = warn if state.env.skip_bad_hosts else abort
                    error(e.message, func=func, exception=e.wrapped)
                else:
                    raise

            # If requested, clear out connections here and not just at the end.
            if state.env.eagerly_disconnect:
                disconnect_all()

        # If running in parallel, block until job queue is emptied
        if jobs:
            err = "One or more hosts failed while executing task '%s'" % (
                my_env['command'])
            jobs.close()
            # Abort if any children did not exit cleanly (fail-fast).
            # This prevents Fabric from continuing on to any other tasks.
            # Otherwise, pull in results from the child run.
            ran_jobs = jobs.run()
            for name, d in iteritems(ran_jobs):
                if d['exit_code'] != 0:
                    if isinstance(d['results'], BaseException):
                        error(err, exception=d['results'])
                    else:
                        error(err)
                results[name] = d['results']

    # Or just run once for local-only
    else:
        with settings(**my_env):
            results['<local-only>'] = task.run(*args, **new_kwargs)
    # Return what we can from the inner task executions

    return results
Example #36
0
def reset_database(host):
    manage_dot_py = _get_manage_dot_py(host)
    with settings(host_string=f'ubuntu@{host}'):
        run(f'{manage_dot_py} flush --noinput'
            )  # replace for LiveServerTestCase on remote
Example #37
0
 def upload_cluster_backups(self, snapshot, incremental_backups):
     logging.info('Uploading backups')
     with settings(parallel=True, pool_size=self.connection_pool_size):
         execute(self.upload_node_backups, snapshot, incremental_backups)
Example #38
0
    def upload_resources(self,
                         user,
                         project_name,
                         working_root=None,
                         static_prefix=None):
        """
        upload static resoures file is exists.

        only works for django+nginx(start with nginx user) project.

        cabric will upload:

            - static    django resource directory
            - assets    webpack resource directory

        ..todo::
            use remote path to validate install

        :return:
        """

        remote_root = self.get_remote_project_path(user, project_name)

        working_root = working_root or os.getcwd()
        django_manage = os.path.join(working_root, 'manage.py')

        if not os.path.exists(django_manage):
            self.warn(
                "local root is not a django project,skip upload resources")
            return

        with settings(warn_only=True):
            if run("test -f %s/manage.py" % remote_root).failed:
                self.warn("deploy project is not django project,"
                          "skip upload resources")
                return
            pass

        try:
            nginx_home = get_home('nginx')
        except ValueError:
            self.warn("remote server only support nginx "
                      "and must use nginx user start,"
                      "skip deploy static resources...")
            return

        static_prefix = static_prefix or ''
        nginx_static_root = os.path.join(nginx_home, static_prefix, 'static')

        # collect static files by user
        # fabric_local('python manage.py collectstatic --noinput')

        with settings(warn_only=True):
            run('test -e {0} || mkdir -p {0}'.format(nginx_static_root))

        static_root_list = [
            os.path.join(working_root, 'static'),
            os.path.join(working_root, 'assets')
        ]

        for v in static_root_list:
            if os.path.exists(v):
                put(v, nginx_static_root)
                pass
            pass

        pass
Example #39
0
def reset_database(host):
    manage_dot_py = _get_manage_dot_py(host)
    with settings(host_string=f'joachimhagege@{host}'):
        run(f'{manage_dot_py} flush --noinput')
Example #40
0
def reset_database(host, port):
    manage_dot_py = _get_manage_dot_py(host)
    with settings(host_string=f'slane@{host}:{port}'):
        run(f'{manage_dot_py} flush --noinput')
Example #41
0
def killfirefox():
    with settings(warn_only=True):
        run("killall firefox")
    def fixup_vhost0_interface_configs(self):
        if self.pdist in ['centos', 'fedora', 'redhat']:
            ## make ifcfg-vhost0
            with open ('%s/ifcfg-vhost0' % self._temp_dir_name, 'w') as f:
                f.write ('''#Contrail vhost0
DEVICE=vhost0
ONBOOT=yes
BOOTPROTO=none
IPV6INIT=no
USERCTL=yes
IPADDR=%s
NETMASK=%s
NM_CONTROLLED=no
#NETWORK MANAGER BUG WORKAROUND
SUBCHANNELS=1,2,3
''' % (self.vhost_ip, self.netmask ))
                # Don't set gateway and DNS on vhost0 if on non-mgmt network
                if not self.multi_net:
                    if self.gateway:
                        f.write('GATEWAY=%s\n' %( self.gateway ) )
                    dns_list = self.get_dns_servers(self.dev)
                    for i, dns in enumerate(dns_list):
                        f.write('DNS%d=%s\n' % (i+1, dns))
                    domain_list = self.get_domain_search_list()
                    if domain_list:
                        f.write('DOMAIN="%s"\n'% domain_list)

                prsv_cfg = []
                mtu = self.get_if_mtu (self.dev)
                if mtu:
                    dcfg = 'MTU=%s' % str(mtu)
                    f.write(dcfg+'\n')
                    prsv_cfg.append (dcfg)
                f.flush ()
            if self.dev != 'vhost0':
                with settings(warn_only = True):
                    local("sudo mv %s/ifcfg-vhost0 /etc/sysconfig/network-scripts/ifcfg-vhost0" % (self._temp_dir_name))
                    local("sync")
                ## make ifcfg-$dev
                if not os.path.isfile (
                        '/etc/sysconfig/network-scripts/ifcfg-%s.rpmsave' % self.dev):
                    with settings(warn_only = True):
                        local("sudo cp /etc/sysconfig/network-scripts/ifcfg-%s /etc/sysconfig/network-scripts/ifcfg-%s.rpmsave" % (self.dev, self.dev))
                self._rewrite_ifcfg_file('%s/ifcfg-%s' % (self._temp_dir_name, self.dev), self.dev, prsv_cfg)

                if self.multi_net :
                    self.migrate_routes(self.dev)

                with settings(warn_only = True):
                    local("sudo mv %s/ifcfg-%s /etc/contrail/" % (self._temp_dir_name, self.dev))

                    local("sudo chkconfig network on")
                    local("sudo chkconfig supervisor-vrouter on")
        # end self.pdist == centos | fedora | redhat
        # setup lbaas prereqs
        self.setup_lbaas_prereq()

        if self.pdist in ['Ubuntu']:
            self._rewrite_net_interfaces_file(self.dev, self.mac, self.vhost_ip, self.netmask, self.gateway,
                        self._args.vmware, self._args.vmware_vmpg_vswitch_mtu,
                        self._args.vmware_datanic_mtu)
        # end self.pdist == ubuntu

        else: # of if self.dev and self.dev != 'vhost0'
            if not os.path.isfile("/etc/contrail/contrail-vrouter-agent.conf"):
                if os.path.isfile("/opt/contrail/contrail_installer/contrail_config_templates/agent_xml2ini.py"):
                    local("sudo python /opt/contrail/contrail_installer/contrail_config_templates/agent_xml2ini.py")
Example #43
0
def reset_database(host, user):
    manage_dot_py = _get_manage_dot_py(host)
    with settings(host_string=f'{user}@{host}'):
        run(f'{manage_dot_py} flush --noinput')
Example #44
0
def create_session_on_server(host, user, email):
    manage_dot_py = _get_manage_dot_py(host)
    with settings(host_string=f'{user}@{host}'):
        session_key = run(f'{manage_dot_py} create_session {email}')
        return session_key.strip()
Example #45
0
    def upload_deploy_key(self, private_key, remote_user, project_name):
        """
        upload deploy key

        :param string private_key: private key local path
            default is ~/.ssh/.deploies/`github`.rsa
        :param string remote_user: remote user name to deploy
        :param string project_name: a project name
        :param string github: github repo name
        :param bool force_renew: try to replace deploy
                    key when use auto-generate
        :param int key_length: must a legal ssh key length value.
                    default is 8192


        ..note::

            if you use github and want to
             use auto generate private-key feature.

            there is two ways can do this:

                - you must set access token in your ~/.gitconfig file
                - you must disable github two-factor authentication,
                    input your username and password.

            currently, we use `<remote_user>@cabric` as our deploy key name.
            so if you upload your key use to other purpose,
             don't use `@cabric` as key suffix.


            if github deploy key already exist and
            you want to replace deploy key. you must set `--fresh-new' option.


            cabric allow each machine deploy multiple github project,
            but disallow deploy same name project in one user.

            if you still want do this.
                - you can set github value if you use it.
                - deploy them in different remote user.


        ..note::

            currently, this only works on linux.

        :return:
        """

        if not os.path.exists(private_key):
            self.error("deploy key `%s' is not exists,please set it." %
                       private_key)

        if os.path.exists(private_key):
            self.print_message("upload deploy key...")
            remote_key = self.get_remote_key(remote_user, project_name)
            remote_key_root = os.path.dirname(remote_key)

            run('test -e {0} || mkdir -p {0}'.format(remote_key_root),
                remote_user)
            with settings(warn_only=True):
                run('chmod 700 -Rf {}'.format(remote_key_root), remote_user)

            fabric_put(private_key, remote_key)
            run('chmod 600 -f {}'.format(remote_key))
            run('chown {1} -f {0}'.format(remote_key, remote_user))
            pass

        pass
Example #46
0
    def __install_tools(self):
        """
        Installs the spark-master-discovery init script and its companion spark-tools. The latter
        is a Python package distribution that's included in cgcloud-spark as a resource. This is
        in contrast to the cgcloud agent, which is a standalone distribution.
        """
        tools_dir = install_dir + '/tools'
        admin = self.admin_account()
        sudo(fmt('mkdir -p {tools_dir}'))
        sudo(fmt('chown {admin}:{admin} {tools_dir}'))
        run(fmt('virtualenv --no-pip {tools_dir}'))
        run(fmt('{tools_dir}/bin/easy_install pip==1.5.2'))

        with settings(forward_agent=True):
            with self._project_artifacts('spark-tools') as artifacts:
                pip(use_sudo=True,
                    path=tools_dir + '/bin/pip',
                    args=concat('install', artifacts))
        sudo(fmt('chown -R root:root {tools_dir}'))

        spark_tools = "SparkTools(**%r)" % dict(user=user,
                                                shared_dir=self._shared_dir(),
                                                install_dir=install_dir,
                                                ephemeral_dir=ephemeral_dir,
                                                persistent_dir=persistent_dir,
                                                lazy_dirs=self.lazy_dirs)

        self.lazy_dirs = None  # make sure it can't be used anymore once we are done with it

        self._register_init_script(
            "sparkbox",
            heredoc("""
                description "Spark/HDFS master discovery"
                console log
                start on (local-filesystems and net-device-up IFACE!=lo)
                stop on runlevel [!2345]
                pre-start script
                for i in 1 2 3; do if {tools_dir}/bin/python2.7 - <<END
                import logging
                logging.basicConfig( level=logging.INFO )
                from cgcloud.spark_tools import SparkTools
                spark_tools = {spark_tools}
                spark_tools.start()
                END
                then exit 0; fi; echo Retrying in 60s; sleep 60; done; exit 1
                end script
                post-stop script
                {tools_dir}/bin/python2.7 - <<END
                import logging
                logging.basicConfig( level=logging.INFO )
                from cgcloud.spark_tools import SparkTools
                spark_tools = {spark_tools}
                spark_tools.stop()
                END
                end script"""))

        script_path = "/usr/local/bin/sparkbox-manage-slaves"
        put(remote_path=script_path,
            use_sudo=True,
            local_path=StringIO(
                heredoc("""
            #!{tools_dir}/bin/python2.7
            import sys
            import logging
            # Prefix each log line to make it more obvious that it's the master logging when the
            # slave calls this script via ssh.
            logging.basicConfig( level=logging.INFO,
                                 format="manage_slaves: " + logging.BASIC_FORMAT )
            from cgcloud.spark_tools import SparkTools
            spark_tools = {spark_tools}
            spark_tools.manage_slaves( slaves_to_add=sys.argv[1:] )""")))
        sudo(fmt("chown root:root {script_path} && chmod 755 {script_path}"))
Example #47
0
def disable_site():
    """Disables the site"""
    require('environment', provided_by=[ubuntu, fedora])

    with settings(hide('stdout', 'stderr')):
        _switch_site(False)
Example #48
0
    def get(self,
            remote_path,
            local_path,
            use_sudo,
            local_is_path,
            rremote=None,
            temp_dir=""):
        from fabric.api import sudo, hide

        # rremote => relative remote path, so get(/var/log) would result in
        # this function being called with
        # remote_path=/var/log/apache2/access.log and
        # rremote=apache2/access.log
        rremote = rremote if rremote is not None else remote_path
        # Handle format string interpolation (e.g. %(dirname)s)
        path_vars = {
            'host': env.host_string.replace(':', '-'),
            'basename': os.path.basename(rremote),
            'dirname': os.path.dirname(rremote),
            'path': rremote
        }

        if local_is_path:
            # Fix for issue #711 and #1348 - escape %'s as well as possible.
            format_re = r'(%%(?!\((?:%s)\)\w))' % '|'.join(path_vars.keys())
            escaped_path = re.sub(format_re, r'%\1', local_path)
            local_path = os.path.abspath(escaped_path % path_vars)

            # Ensure we give ssh.SFTPCLient a file by prepending and/or
            # creating local directories as appropriate.
            dirpath, filepath = os.path.split(local_path)
            if dirpath and not os.path.exists(dirpath):
                os.makedirs(dirpath)
            if os.path.isdir(local_path):
                local_path = os.path.join(local_path, path_vars['basename'])

        if output.running:
            print("[%s] download: %s <- %s" %
                  (env.host_string, _format_local(local_path,
                                                  local_is_path), remote_path))
        # Warn about overwrites, but keep going
        if local_is_path and os.path.exists(local_path):
            msg = "Local file %s already exists and is being overwritten."
            warn(msg % local_path)

        # When using sudo, "bounce" the file through a guaranteed-unique file
        # path in the default remote CWD (which, typically, the login user will
        # have write permissions on) in order to sudo(cp) it.
        if use_sudo:
            target_path = remote_path
            hasher = hashlib.sha1()
            hasher.update(env.host_string)
            hasher.update(target_path)
            target_path = posixpath.join(temp_dir, hasher.hexdigest())
            # Temporarily nuke 'cwd' so sudo() doesn't "cd" its mv command.
            # (The target path has already been cwd-ified elsewhere.)
            with settings(hide('everything'), cwd=""):
                sudo('cp -p "%s" "%s"' % (remote_path, target_path))
                # The user should always own the copied file.
                sudo('chown %s "%s"' % (env.user, target_path))
                # Only root and the user has the right to read the file
                sudo('chmod %o "%s"' % (0400, target_path))
                remote_path = target_path

        try:
            # File-like objects: reset to file seek 0 (to ensure full overwrite)
            # and then use Paramiko's getfo() directly
            getter = self.ftp.get
            if not local_is_path:
                local_path.seek(0)
                getter = self.ftp.getfo
            getter(remote_path, local_path)
        finally:
            # try to remove the temporary file after the download
            if use_sudo:
                with settings(hide('everything'), cwd=""):
                    sudo('rm -f "%s"' % remote_path)

        # Return local_path object for posterity. (If mutated, caller will want
        # to know.)
        return local_path
Example #49
0
 def clear_cluster_snapshot(self, snapshot):
     logging.info('Clearing snapshots')
     with settings(parallel=True, pool_size=self.connection_pool_size):
         execute(self.clear_node_snapshot, snapshot)
Example #50
0
def prepare_hidden_files():
    with settings(hide('warnings', ), warn_only=True):
        sudo(
            'cp {basepath}/secret_facegame_settings.py {project_root}/facegame/settings/'
            .format(**env))
Example #51
0
 def start_cluster_backup(self, snapshot, incremental_backups=False):
     logging.info('Creating snapshots')
     with settings(parallel=True, pool_size=self.connection_pool_size):
         execute(self.node_start_backup, snapshot, incremental_backups)
Example #52
0
def ownership():
    sudo('chown -fR {owner} {basepath}/media'.format(**env))
    sudo('chown -fR {owner} {basepath}'.format(**env))
    with settings(hide('warnings', ), warn_only=True):
        sudo('chown -fR {owner} {basepath}/sqlite.db'.format(**env))
Example #53
0
def create_device_dirs(device):
    from fabric.api import env
    with settings(warn_only=True, user='******'):
        run("mkdir /builds/%s" % device)
    with settings(user='******'):
        run("chown cltbld.cltbld /builds/%s" % device)
Example #54
0
def remove_old_containers():
    with settings(warn_only=True):
        run('docker ps -aq| xargs docker rm')
Example #55
0
def _execute(task, host, my_env, args, kwargs, jobs, queue, multiprocessing):
    """
    Primary single-host work body of execute()
    """
    # Log to stdout
    if state.output.running and not hasattr(task, 'return_value'):
        print("[%s] Executing task '%s'" % (host, my_env['command']))
    # Create per-run env with connection settings
    local_env = to_dict(host)
    local_env.update(my_env)
    # Set a few more env flags for parallelism
    if queue is not None:
        local_env.update({'parallel': True, 'linewise': True})
    # Handle parallel execution
    if queue is not None:  # Since queue is only set for parallel
        name = local_env['host_string']

        # Wrap in another callable that:
        # * expands the env it's given to ensure parallel, linewise, etc are
        #   all set correctly and explicitly. Such changes are naturally
        #   insulted from the parent process.
        # * nukes the connection cache to prevent shared-access problems
        # * knows how to send the tasks' return value back over a Queue
        # * captures exceptions raised by the task
        def inner(args, kwargs, queue, name, env):
            state.env.update(env)

            def submit(result):
                queue.put({'name': name, 'result': result})

            try:
                key = normalize_to_string(state.env.host_string)
                state.connections.pop(key, "")
                submit(task.run(*args, **kwargs))
            except BaseException as e:  # We really do want to capture everything
                # SystemExit implies use of abort(), which prints its own
                # traceback, host info etc -- so we don't want to double up
                # on that. For everything else, though, we need to make
                # clear what host encountered the exception that will
                # print.
                if e.__class__ is not SystemExit:
                    sys.stderr.write(
                        "!!! Parallel execution exception under host %r:\n" %
                        name)
                    submit(e)
                # Here, anything -- unexpected exceptions, or abort()
                # driven SystemExits -- will bubble up and terminate the
                # child process.
                raise

        # Stuff into Process wrapper
        kwarg_dict = {
            'args': args,
            'kwargs': kwargs,
            'queue': queue,
            'name': name,
            'env': local_env,
        }
        p = multiprocessing.Process(target=inner, kwargs=kwarg_dict)
        # Name/id is host string
        p.name = name
        # Add to queue
        jobs.append(p)
    # Handle serial execution
    else:
        with settings(**local_env):
            return task.run(*args, **kwargs)
Example #56
0
def remove_old_images():
    with settings(warn_only=True):
        run('docker image prune -f')
Example #57
0
def run_cmd_through_node(host_string,
                         cmd,
                         password=None,
                         gateway=None,
                         gateway_password=None,
                         with_sudo=False,
                         timeout=120,
                         as_daemon=False,
                         raw=False,
                         cd=None,
                         warn_only=True,
                         logger=None):
    """ Run command on remote node through another node (gateway).
        This is useful to run commands on VMs through compute node
    Args:
        host_string: host_string on which the command to run
        password: Password
        cmd: command
        gateway: host_string of the node through which host_string will connect
        gateway_password: Password of gateway hoststring
        with_sudo: use Sudo
        timeout: timeout
        cd: change directory to provided parameter
        as_daemon: run in background
        raw: If raw is True, will return the fab _AttributeString object itself without removing any unwanted output
    """
    logger = logger or contrail_logging.getLogger(__name__)
    fab_connections.clear()
    kwargs = {}
    if as_daemon:
        cmd = 'nohup ' + cmd + ' &'
        kwargs['pty'] = False

    if cd:
        cmd = 'cd %s; %s' % (cd, cmd)

    (username, host_ip) = host_string.split('@')

    if username == 'root':
        with_sudo = False

    shell = '/bin/bash -l -c'

    if username == 'cirros':
        shell = '/bin/sh -l -c'

    _run = safe_sudo if with_sudo else safe_run

    #with hide('everything'), settings(host_string=host_string,
    with settings(host_string=host_string,
                  gateway=gateway,
                  warn_only=warn_only,
                  shell=shell,
                  disable_known_hosts=True,
                  abort_on_prompts=False):
        env.forward_agent = True
        gateway_hoststring = gateway if re.match(r'\w+@[\d\.]+:\d+',
                                                 gateway) else gateway + ':22'
        node_hoststring = host_string if re.match(
            r'\w+@[\d\.]+:\d+', host_string) else host_string + ':22'
        if password:
            env.passwords.update({node_hoststring: password})
            # If gateway_password is not set, guess same password
            # (if key is used, it will be tried before password)
            if not gateway_password:
                env.passwords.update({gateway_hoststring: password})

        if gateway_password:
            env.passwords.update({gateway_hoststring: gateway_password})
            if not password:
                env.passwords.update({node_hoststring: gateway_password})

        logger.debug(cmd)
        tries = 1
        output = None
        while tries > 0:
            try:
                output = _run(cmd, timeout=timeout, **kwargs)
            except CommandTimeout:
                pass
            if (output) and ('Fatal error' in output):
                tries -= 1
                time.sleep(5)
            else:
                break
        # end while

        if not raw:
            real_output = remove_unwanted_output(output)
        else:
            real_output = output
        return real_output
Example #58
0
 def run(txt, prompts):
     with settings(prompts=prompts):
         # try to fulfil the OutputLooper interface, only want to test
         # _get_prompt_response. (str has a method upper)
         ol = OutputLooper(str, 'upper', None, list(txt), None)
         return ol._get_prompt_response()
Example #59
0
    def __init__(self, args_str=None):
        #print sys.argv[1:]
        self._args = None
        if not args_str:
            args_str = ' '.join(sys.argv[1:])
        self._parse_args(args_str)

        if self._args.storage_setup_mode == 'unconfigure':
            return

        NOVA_CONF = '/etc/nova/nova.conf'
        LIBVIRTD_CONF = '/etc/libvirt/libvirtd.conf'
        LIBVIRTD_TMP_CONF = '/tmp/libvirtd.conf'
        LIBVIRTD_CENTOS_BIN_CONF = '/etc/sysconfig/libvirtd'
        LIBVIRTD_UBUNTU_BIN_CONF = '/etc/default/libvirt-bin'
        LIBVIRTD_TMP_BIN_CONF = '/tmp/libvirtd.tmp'

        for hostname, entries, entry_token in zip(
                self._args.storage_hostnames, self._args.storage_hosts,
                self._args.storage_host_tokens):
            if entries != self._args.storage_master:
                with settings(host_string='root@%s' % (entries),
                              password=entry_token):
                    if self._args.add_storage_node:
                        if self._args.add_storage_node != hostname:
                            continue
                    run('openstack-config --set %s DEFAULT live_migration_flag VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE'
                        % (NOVA_CONF))
                    run('openstack-config --set %s DEFAULT vncserver_listen 0.0.0.0'
                        % (NOVA_CONF))
                    run('cat %s | sed s/"#listen_tls = 0"/"listen_tls = 0"/ | sed s/"#listen_tcp = 1"/"listen_tcp = 1"/ | sed s/\'#auth_tcp = "sasl"\'/\'auth_tcp = "none"\'/ > %s'
                        % (LIBVIRTD_CONF, LIBVIRTD_TMP_CONF),
                        shell='/bin/bash')
                    run('cp -f %s %s' % (LIBVIRTD_TMP_CONF, LIBVIRTD_CONF))
                    libvirtd = run('ls %s 2>/dev/null |wc -l' %
                                   (LIBVIRTD_CENTOS_BIN_CONF))
                    if libvirtd != '0':
                        run('cat %s | sed s/"#LIBVIRTD_ARGS=\"--listen\""/"LIBVIRTD_ARGS=\"--listen\""/ > %s'
                            %
                            (LIBVIRTD_CENTOS_BIN_CONF, LIBVIRTD_TMP_BIN_CONF),
                            shell='/bin/bash')
                        run('cp -f %s %s' %
                            (LIBVIRTD_TMP_BIN_CONF, LIBVIRTD_CENTOS_BIN_CONF))
                        run('service openstack-nova-compute restart')
                        run('service libvirtd restart')

                    libvirtd = run('ls %s 2>/dev/null |wc -l' %
                                   (LIBVIRTD_UBUNTU_BIN_CONF))
                    if libvirtd != '0':
                        libvirt_configured = run(
                            'cat %s |grep "\-d \-l"| wc -l' %
                            (LIBVIRTD_UBUNTU_BIN_CONF))
                        if libvirt_configured == '0':
                            run('cat %s | sed s/"-d"/"-d -l"/ > %s' %
                                (LIBVIRTD_UBUNTU_BIN_CONF,
                                 LIBVIRTD_TMP_BIN_CONF),
                                shell='/bin/bash')
                            run('cp -f %s %s' % (LIBVIRTD_TMP_BIN_CONF,
                                                 LIBVIRTD_UBUNTU_BIN_CONF))
                            run('service nova-compute restart')
                            run('service libvirt-bin restart')
Example #60
0
def reset_database(host, name='server'): # name - имя пользователя на сервере
    '''обнулить базу данных'''
    manage_dot_py = _get_manage_dot_py(host)
    with settings(host_string=f'{name}@{host}'): # установка имя узла с помощью контекстного менеджера
        run(f'{manage_dot_py} flush --noinput') # находясь в контекстном менеджере вызываем команды (в данном случае run), как будно находимся в fabfile.py