Example #1
0
def getMeasures():
    with hide('everything'):
        timestamp = datetime.datetime.utcnow().replace(tzinfo=utc)
        now = timestamp - datetime.timedelta(seconds=timestamp.second, microseconds=timestamp.microsecond)
        env.skip_bad_hosts = True
        env.parallel = False
        env.timeout = 2
        env.connection_attempts = 1
        servers = Server.objects.filter(is_measuring=False).select_related()
        for server in servers:
            try:
                server.is_measuring = True
                server.save(force_update=True)
                logger.info("Getting measures for server '%s'" % server.hostname.encode('utf-8'))
                probes = server.probes.all()
                if len(probes) == 0:
                    continue
                env.hosts = [server.ip]
                env.user = server.username
                env.password = server.password
                with hide('running', 'stdout', 'stderr', 'user'):
                    value = execute(launch_command, probes)
                    outputs = value[server.ip]
                    for probe in probes:
                        if probe.graph_type.name in ['pie', 'text']:
                            Measure.objects.filter(server=server, probe=probe).delete()
                        Measure.objects.create(timestamp=now, server=server, probe=probe, value=outputs[probe.id])
                    server.state = outputs[-1]
            except Exception, e:
                logger.exception(e)
            finally:
Example #2
0
    def handle(self, *args, **options):

        self.style = no_style()
        # manage.py execution specific variables verbosity
        # 0 = No output at all,
        # 1 = woven output only,
        # 2 = Fabric outputlevel = everything except debug.
        state.env.verbosity = int(options.get('verbosity', 1))

        # show_traceback = options.get('traceback', False)
        set_env.no_domain = True
        state.env.INTERACTIVE = options.get('interactive')
        if int(state.env.verbosity) < 2:
            with hide('warnings', 'running', 'stdout', 'stderr'):
                set_env()
        else:
            set_env()
        if not state.env.PIP_REQUIREMENTS:
            req_files = glob('req*')
        else:
            req_files = state.env.PIP_REQUIREMENTS
        dist_dir = os.path.join(os.getcwd(), 'dist')
        if not os.path.exists(dist_dir):
            os.mkdir(dist_dir)
        for r in req_files:
            bundle = ''.join([r.split('.')[0], '.zip'])
            command = 'pip bundle -r %s %s/%s' % (r, dist_dir, bundle)
            if state.env.verbosity:
                print command
            if int(state.env.verbosity) < 2:
                with hide('warnings', 'running', 'stdout', 'stderr'):
                    local(command)
            else:
                local(command)
Example #3
0
    def _grab_sosreports(self, all_hosts):
        """
        Create local directory
        for downloaded sosreports
        """
        timestamp = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S') 
        directory = 'sosreport-' + timestamp
        message = 'Creating directory ' + directory + " on localhost"
        self.info(message)
        with hide('everything'):
            mkdir_output = self.create_localdir(directory)
        if re.search('created', mkdir_output):
            self.success('localhost: Directory ' + directory
                         + ' created successfully')
        else:
            self.failure('localhost: Directory ' + directory
                         + ' creation failed')
        """
        Remove old sosreports are around in /tmp
        """
        with hide('everything'):
            rm_output = self.run_command_on_hosts('rm -rf /tmp/sosreport*',
                                                  hosts=all_hosts)
        for host in all_hosts:
            if not rm_output[host]:
                self.success(host + ':old sosreports successfully removed')
            else:
                self.failure(host + ':old sosreports failed to be removed')

        with hide('everything'):
            """
            Execute sosreport on all hosts
            """
            sosreport_output = self.execute_sosreports_on_hosts(hosts=all_hosts)
        for host in all_hosts:
            """
            Confirm sosreport ran successfully;
            Download sosreport to local client
            """
            hostname = host.replace(".", '')
            sosfile = 'sosreport-' + hostname
            for output in sosreport_output[host].split('\n'):
                sosreport_file = re.search(sosfile, output, re.I)
                if sosreport_file:
                    self.success(host + ':sosreport finished - '
                                + output)
                    remote_path = output.strip()
                    local_file = remote_path.split('/')[2]
                    format_host = host.replace(".", "_")
                    local_path = directory + "/" + format_host + "-" + local_file
                    with hide('everything'):
                        cp_output = self.get_file_on_host(remote_path,
                                                          local_path,
                                                          host=host)
                    if cp_output:
                        self.success(host + ':sosreport downloaded - '
                                    + local_path)
                    else:
                        self.failure(host + ':sosreport failed to download - '
                                    + local_path)
Example #4
0
def put_dir_with_sudo(local_path, remote_path):
    # TODO: implement remote_path=None & return remote_path

    with lcd(local_path):
        source_basename = os.path.basename(local_path)

        print('Uploading {0} to {1}:{2}…'.format(local_path, env.host_string,
              remote_path), end='')
        sys.stdout.flush()

        save_file_name = '../{0}-copy-{1}.tar.gz'.format(source_basename,
                                                         generate_random_name())
        while os.path.exists(save_file_name):
            save_file_name = '../{0}-copy-{1}.tar.gz'.format(
                source_basename, generate_random_name())

        with hide('running', 'stdout', 'stderr'):
            local("tar -czf '{0}' . ".format(save_file_name))

        remote_dirname, remote_basename = remote_path.rsplit(os.sep, 1)

        with hide('running', 'stdout', 'stderr'):
            put(save_file_name, remote_dirname, use_sudo=True)
            local('rm -f "{0}"'.format(save_file_name))

        with cd(remote_dirname):
            with hide('running', 'stdout', 'stderr'):
                sudo('mkdir -p "{0}"'.format(remote_basename))

                with cd(remote_basename):
                    sudo('tar -xzf "{0}"'.format(save_file_name))
                    sudo('rm -f "{0}"'.format(save_file_name))

            print(' done.')
def commit(env='development', message='', push='n', test='y'):
    """
    Run tests, add, commit and push files for the project and extra apps.
    Notice this adds all changed files to git index. This can e replaced by manual git commands if more granularity is needed.
    """

    project_settings = get_settings()
    projects = build_projects_vars()
    project = projects[env]

    if env != 'production':
        print "========================================================"
        print "COMMIT IN %s..." % env.upper()
        # TODO testing before committing
        #run_tests(env)
        for app in project_settings.EXTRA_APPS:
            if app[env]['dir'][:len(project['dir'])] == project['dir']:
                print "\nThe application %s is inside the project directory, no need to commit separately." % app['name']
            else:
                with settings(hide('warnings'), warn_only=True):
                    print "\nCommitting changes for application %s in %s." % (app['name'], app[env]['dir'])
                    local("cd %s && git add . && git commit -m '%s'" % (app[env]['dir'], message))
                    if push == 'y':
                        local("cd %s && git push" % app[env]['dir'])

        with settings(hide('warnings'), warn_only=True):
            print "\nCommitting changes in the directory project %s." % project['dir']
            local("cd %s && git add . && git commit -m '%s'" % (project['dir'], message))
            if push == 'y':
                local("cd %s && git push" % project['dir'])
        print "========================================================"
Example #6
0
def scm_get_info(scm_type, scm_ref=None, directory=False):

    scm_info = None

    if not scm_ref:
        scm_ref = scm_get_ref(scm_type, True)

    if not directory:
        directory = "."

    if scm_type.lower() == "svn":
        with lcd(directory):
            with hide("running"):
                xml = local("svn info --xml", capture=True)
                dom = minidom.parseString(xml)
                scm_info = {
                    "type": scm_type,
                    "rev": dom.getElementsByTagName("entry")[0].getAttribute("revision"),
                    "url": dom.getElementsByTagName("url")[0].firstChild.wholeText,
                    "branch": scm_ref,
                }

    elif scm_type.lower() == "git":
        with lcd(directory):
            with hide("running"):
                revision = local("git describe --always", capture=True)
                repo = local("git remote -v | grep fetch", capture=True)
                scm_info = {"type": scm_type, "rev": revision, "url": repo, "branch": scm_ref}

    return scm_info
Example #7
0
def start(name=None, ephemeral=False, environment=None, daemonize=True,
    command=None, **kwargs):
    """
    Starts a Container

    :param name: Name of container
    :param ephemeral: Disregard changes after stop (default: False)
    :param environment: Environment variables (list of KEY=VALUE strings)
    :param daemonize: Run as a daemon (default: True)
    :param command: Command to run (optional)

    """
    if not name:
        raise StandardError('You must specify a name')
    cmd = 'lxc-start -n {0} -c /tmp/{0}.lxc.console'.format(name)
    if ephemeral:
        cmd = 'lxc-start-ephemeral -o {0}'.format(name)
    if daemonize:
        cmd += ' -d'
    if command:
        cmd += ' -- {0}'.format(command)
    with hide('stdout',):
        sudo('nohup {0} > /dev/null 2>&1'.format(cmd))
    if environment:
        with cd(os.path.join(LXC_PATH, name)), hide('stdout'):
            env = '\n'.join(environment)
            sudo('echo \"{0}\" >> ./rootfs/etc/environment'.format(
                env))
    log.info('{0} started'.format(name))
Example #8
0
def config_couchdb():
    if files.exists('/etc/cozy/couchdb.login'):
        # CouchDB has an old admin
        with hide('running', 'stdout'):
            # Recover old password
            logins = sudo('cat /etc/cozy/couchdb.login')
            logsCouchDB = logins.split('\r\n')
            # Add new admin
            couch_admin_path = "@127.0.0.1:5984/_config/admins/"
            run('curl -X PUT http://%s:%s%s%s -d \'\"%s\"\'' %
                    (logsCouchDB[0], logsCouchDB[1], couch_admin_path, username, password))
            # Delete old admin
            run('curl -X DELETE http://%s:%[email protected]:5984/_config/admins/%s' %
                (username, password, logsCouchDB[0]))
            sudo('rm -rf /etc/cozy/couchdb.login')
    else:
        # CouchDB has not an admin
        # Create admin
        with hide('running', 'stdout'):
            couch_admin_path = "127.0.0.1:5984/_config/admins/"
            run('curl -X PUT http://%s%s -d \'\"%s\"\'' %
                    (couch_admin_path, username, password))
        sudo('mkdir -p /etc/cozy')
    # Create file to keep admin's password
    require.files.file(path='/etc/cozy/couchdb.login',
        contents=username + "\n" + password,
        use_sudo=True,
        owner='cozy-data-system',
        mode='700'
    )
    print(green("CouchDB 1.3.0 successfully configured"))
Example #9
0
 def _check_packages(self, all_hosts, packages):
     for package in packages:
         with hide('everything'):
             """
             Use rpm --query --all to confirm package
             exists; if not, issue a warning and install
             missing package
             """
             rpm_output = self.run_command_on_hosts('rpm ' 
                          + '--query --all ' + package,
                          hosts=all_hosts)
         for host in all_hosts:
             if re.search(package, rpm_output[host]):
                 self.success(host + ':Package found - ' + package)
             else:
                 self.warning(host + ':Package not installed - '
                             + package + '; Installing ' + package)
                 with hide('everything'):
                     yum_output = self.run_command_on_host('yum install '
                                  + ' --assumeyes --quiet --nogpgcheck ' + package,
                                  host=host)
                 if not yum_output:
                     self.success(host + ':Package installed - ' + package)
                 else:
                     self.failure(host + ':Package failed to install - ' + package)
    def _install_multicast_test_prereq(self, java_components):
        """
        Install prerequisite package(s) for test to
        determine if environment allows multicast UDP connections
        between Eucalyptus Java components
 
        :param java_components: list of Eucalyptus Java components
        """
        packages = ["iperf"]
        for package in packages:
            with hide("everything"):
                """
                Use rpm --query --all to confirm package
                exists; if not, issue a warning and install
                missing package
                """
                rpm_output = self.run_command_on_hosts("rpm " + "--query --all " + package, java_components)
            for host in java_components:
                if re.search(package, rpm_output[host]):
                    self.success(host + ":Package found - " + package)
                else:
                    self.warning(host + ":Package not installed - " + package + "; Installing " + package)
                    with hide("everything"):
                        yum_output = self.run_command_on_host(
                            "yum install " + " --assumeyes --quiet --nogpgcheck " + package, host=host
                        )
                    if not yum_output:
                        self.success(host + ":Package installed - " + package)
                    else:
                        self.failure(host + ":Package failed to install - " + package)
Example #11
0
def install_mysql():
    """Install and configure a standard mysql5.1 server"""
    env.datadir = "/usr/local/var/db/mysql"
    env.root_password = '******'
    
    stop_mysql()
    
    local("PACKAGESITE=%s; export PACKAGESITE" % env.packagesite)
    pkgsite = "PACKAGESITE=%s; export PACKAGESITE" % env.packagesite
    with settings(hide('warnings'), warn_only=True):
        local("%s;pkg_add -r mysql-server-5.1.34.tbz" % pkgsite)
    
    ##TODO: Move this file to a more appropriate svn repo    
    local("svn cat %s/build_system/trunk/install/my.cnf > /etc/my.cnf" % env.svn_repo)
    local("rm -rf %s" % env.datadir)
    local("mkdir -p %s" % env.datadir)
    local("chown mysql:mysql %s" % env.datadir)
    local("/usr/local/bin/mysql_install_db --user=mysql --datadir=%s" % env.datadir)
    _process_conf("/etc/rc.conf", "mysql_enable=YES")
    _process_conf("/etc/rc.conf", 'mysql_dbdir=/usr/local/var/db/mysql')
    
    local("/usr/local/etc/rc.d/mysql-server start")
    
    ## Setup root access
    with settings(hide('warnings'), warn_only=True):
        local("/usr/local/bin/mysqladmin -u root password '%s'" % env.root_password)
    
    ## TODO: REPLACE WITH file in svn
    local('''mysql -u root -pqwerty1 -e "GRANT ALL ON *.* TO 'pbrian'@'10.137.0.0/255.255.0.0' IDENTIFIED BY 'devpass';"''')
    local('''mysql -u root -pqwerty1 -e "GRANT ALL ON *.* TO 'backup'@'10.137.0.0/255.255.0.0' IDENTIFIED BY 'backpass';"''')
    local('''mysql -u root -pqwerty1 -e "GRANT ALL ON *.* TO 'robc'@'10.137.0.0/255.255.0.0' IDENTIFIED BY 'devpass';"''')
    local("mysqladmin flush-privileges -u root -p%s" % env.root_password)
    
    start_mysql()
    local("unset PACKAGESITE")
Example #12
0
def get_config(package_name, save_as=None):
	try:
		config = json.loads(sys.stdin.read())[0]
	except Exception as e:
		print e
		return

	c_map = {
		'p' : int(config['HostConfig']['PortBindings']['22/tcp'][0]['HostPort']),
		'o' : "PubkeyAuthentication=no",
		'u' : config['Config']['User'],
		'h' : str(config['Config']['WorkingDir']),
		'a' : package_name
	}

	package_config = None

	cmd = "ssh -f -o %(o)s -p %(p)d %(u)s@localhost 'source ~/.bash_profile && cd %(h)s/%(a)s/lib/Annex && python unveillance_annex.py -config'" % (c_map)
	
	with settings(hide('everything'), warn_only=True):	
		sentinel_found = False
		for line in local(cmd, capture=True).splitlines():
			if re.match(r'THE FOLLOWING LINES MAKE FOR YOUR FRONTEND CONFIG', line):
				sentinel_found = True
				continue

			if not sentinel_found:
				continue

			try:
				if line[0] == '{' and line[-1] == '}':
					package_config = line
					break
			except Exception as e:
				continue

	if package_config is not None:
		package_config = json.loads(package_config)

	s = package_config['server_port']
	m = package_config['server_message_port']

	package_config.update({
		'annex_remote_port' : c_map['p'],
		'server_port' : int(config['HostConfig']['PortBindings']['%d/tcp' % s][0]['HostPort']),
		'server_message_port' : int(config['HostConfig']['PortBindings']['%d/tcp' % m][0]['HostPort']),
		'server_force_ssh' : True,
		'server_user' : c_map['u']
	})

	i = "%s.%s" % (config['Config']['Image'].replace(":", "-"), config['Config']['Hostname'])

	if not os.path.exists("configs"):
		with settings(hide('everything'), warn_only=True):
			local("mkdir configs")

	with open("configs/%s.json" % i, 'wb+') as C:
		C.write(json.dumps(package_config))

	print i
Example #13
0
def update_version_photos_photos():
    # Install photos
    sudo('cozy-monitor install photos -b feature/photos')
    # Restart all apps
    sudo('cozy-monitor restart data-system')
    sudo('cozy-monitor restart home')
    sudo('cozy-monitor restart proxy')
    sudo('cozy-monitor restart contacts')
    sudo('cozy-monitor restart photos')
    # Check Contacts
    with hide('running', 'stdout'):
        result = run('curl http://localhost:9114')
    result = result.find('Digidisk - Contacts')
    if result == -1:
        print colored('Contact installing failed', 'red')
    else:
        # Check Photos
        with hide('running', 'stdout'):
            result = run('curl http://localhost:9119')
        result = result.find('Digidisk - Photos')
        if result == -1:
            print colored('Photo installing failed', 'red')
        else:
            run('cozy-monitor status')
            print colored('Stack successfully updated', 'green')
Example #14
0
def ensure_ssh_key_added(key_files):
    need_adding = set(os.path.abspath(os.path.expanduser(p))
                      for p in key_files)
    with settings(hide('warnings', 'running', 'stdout', 'stderr'),
                  warn_only=True):
        # First check already added keys
        res = local("ssh-add -l", capture=True)
        if res.succeeded:
            for line in res.splitlines():
                m = SSH_KEY_LIST_RE.match(line)
                if not m:
                    continue
                path = os.path.abspath(os.path.expanduser(m.group('key_file')))
                need_adding.discard(path)

    with settings(hide('warnings', 'running', 'stdout', 'stderr')):
        # Next add missing keys
        if need_adding:
            key_string = ' '.join(need_adding)
            start_ssh_agent = ("eval `ssh-agent` && echo $SSH_AUTH_SOCK && "
                               "ssh-add %s") % key_string
            info_agent = local(start_ssh_agent, capture=True).splitlines()
            os.environ["SSH_AGENT_PID"] = info_agent[0].split()[-1]
            os.environ["SSH_AUTH_SOCK"] = info_agent[1]
            return False
        else:
            return True
Example #15
0
    def _install_conn_tool(self, all_hosts):
        """
        Install iperf on all components in order to
        perform network connectivity tests.

        :param all_hosts: set of all Eucalyptus components
        """
        self.info('Confirm iperf has been installed on'
                  + ' all components')
        packages = ['iperf']
        for package in packages:
            with hide('everything'):
                """
                Use rpm --query --all to confirm package
                exists; if not, issue a warning and install
                missing package
                """
                rpm_output = self.run_command_on_hosts('rpm '
                                                       + '--query --all '
                                                       + package,
                                                       all_hosts)
            for host in all_hosts:
                if re.search(package, rpm_output[host]):
                    self.success(host + ':Package found - ' + package)
                else:
                    self.warning(host + ':Package not installed - '
                                + package + '; Installing ' + package)
                    with hide('everything'):
                        yum_output = self.run_command_on_host('yum install '
                                     + ' --assumeyes --quiet --nogpgcheck ' + package,
                                     host=host)
                    if not yum_output:
                        self.success(host + ':Package installed - ' + package)
                    else:
                        self.failure(host + ':Package failed to install - ' + package) 
def configure_rabbitmq():
    '''Add user and host to RabbitMQ server using credentials in settings_local_*.py

    '''
    puts("Configuring RabbitMQ")

    # create user unless it already exists
    with settings(hide('commands')):
        user_list = sudo("rabbitmqctl -q list_users")
    #TODO: change to check only the first column
    if django_settings.BROKER_USER not in user_list:
        sudo("rabbitmqctl add_user {} {}"
             .format(django_settings.BROKER_USER, django_settings.BROKER_PASSWORD))
    else:
        puts("User '{}' already exists".format(django_settings.BROKER_USER))

    # create host unless it already exists
    with settings(hide('commands'), warn_only=True):
        host_list = sudo("rabbitmqctl -q list_vhosts")
    if django_settings.BROKER_HOST not in host_list:
        sudo("rabbitmqctl add_vhost {}".format(django_settings.BROKER_HOST))
    else:
        puts("Host '{}' already exists".format(django_settings.BROKER_HOST))

    # set permissions for the user
    sudo("rabbitmqctl set_permissions -p {} {} '.*' '.*' '.*'"
         .format(django_settings.BROKER_HOST, django_settings.BROKER_USER))
Example #17
0
def install_packages(update=True):
    update = to_boolean(update)
    if update:
        with hide('stdout'):
            sudo('apt-get update')
    print "Installing software"
    with hide('stdout'):
        sudo('apt-get install -y {0}'.format(' '.join(packages_to_install)))
Example #18
0
def get_lxc_ip(name=None):
    # get lxc-ip script if doesn't exist
    out = run('test -e /usr/local/bin/lxc-ip', quiet=True,
        warn_only=True)
    if out.return_code != 0:
        with hide('stdout'):
            sudo('wget {0} -O /usr/local/bin/lxc-ip ; chmod +x /usr/local/bin/lxc-ip'.format(
                LXC_IP_LINK))
    with hide('stdout'):
        out = sudo('/usr/local/bin/lxc-ip -n {0}'.format(name))
    return out
Example #19
0
    def init(self):
        # create baseuser
        from golive.stacks.stack import config

        env.user = config['INIT_USER']
        DebianPackageMixin.init(self, update=True)

        env.project_name = config['PROJECT_NAME']
        user = config['USER']

        info("SUDO: configure for user %s" % env.user)
        with settings(warn_only=True):
            # create user
            self.execute(self._useradd)
            # add to sudo
            self.append_with_inituser("/etc/sudoers", "%s ALL=NOPASSWD: ALL" % user, user=env.user)


        pip_cache_dir = "/var/cache/pip"
        debug("PIP: Create cache dir %s" % pip_cache_dir)
        with settings(warn_only=True):
            with hide("warnings"):
                self.sudo("mkdir %s" % pip_cache_dir)
                self.sudo("chmod 777 %s" % pip_cache_dir)

        # create rc file
        with settings(warn_only=True):
            self.execute(sudo, "touch /home/%s/.golive.rc" % user)
            self.execute(sudo, "chmod 600 /home/%s/.golive.rc" % user)
            self.execute(sudo, "chown %s:%s /home/%s/.golive.rc" % (user, user, user))
            self.append("/home/%s/.bashrc" % user, ". .golive.rc")
            self.append("/home/%s/.bash_profile" % user, ". .golive.rc")

        # setup ssh pub-auth for user
        pubkey_file = config['PUBKEY']
        info("PUBKEY: Put %s to authorized_keys2" % pubkey_file)
        with settings(warn_only=True):
            with hide("warnings"):
                self.sudo("mkdir /home/%s/.ssh/" % user)
            self.sudo("chmod 700 /home/%s/.ssh/" % user)
            self.sudo("chown %s:%s /home/%s/.ssh/" % (user, user, user))
            self.sudo("touch /home/%s/.ssh/authorized_keys2" % user)
            self.sudo("chmod 600 /home/%s/.ssh/authorized_keys2" % user)
            self.sudo("chown %s:%s /home/%s/.ssh/authorized_keys2" % (user, user, user))

        self.append("/home/%s/.ssh/authorized_keys2" % user, self.readfile(os.path.expanduser(pubkey_file)))

        env.user = config['USER']

        # set base variables
        from golive.stacks.stack import environment
        for host in environment.hosts:
            args = (config['ENV_ID'], 'HOST', host, True)
            environment.stack.do("set_var", full_args=args)
Example #20
0
def clean_openvswitch(bridge_name='ovsbr0', internal_bridge_name='ovsbr-int'):
    check_valid_os()
    print(':: Cleaning Open vSwitch on {}'.format(env.host_string))
    with settings(warn_only=True), hide('stdout', 'running', 'warnings'):
        hostname = run('hostname -s | md5sum | head -c 8')
        tep_name = 'tep-{}'.format(hostname)
        gre_name = 'gre-{}'.format(hostname)
        sudo('ovs-vsctl del-port {} {}'.format(bridge_name, tep_name))
        sudo('ovs-vsctl del-port {} {}'.format(bridge_name, gre_name))
    with settings(warn_only=True), hide('stdout', 'running', 'warnings'):
        sudo('ovs-vsctl del-br {}'.format(bridge_name))
        sudo('ovs-vsctl del-br {}'.format(internal_bridge_name))
Example #21
0
def clean_openvswitch(bridge_name="ovsbr0", internal_bridge_name="ovsbr-int"):
    check_valid_os()
    print(":: Cleaning Open vSwitch on {}".format(env.host_string))
    with settings(warn_only=True), hide("stdout", "running", "warnings"):
        hostname = run("hostname -s | md5sum | head -c 8")
        tep_name = "tep-{}".format(hostname)
        gre_name = "gre-{}".format(hostname)
        sudo("ovs-vsctl del-port {} {}".format(bridge_name, tep_name))
        sudo("ovs-vsctl del-port {} {}".format(bridge_name, gre_name))
    with settings(warn_only=True), hide("stdout", "running", "warnings"):
        sudo("ovs-vsctl del-br {}".format(bridge_name))
        sudo("ovs-vsctl del-br {}".format(internal_bridge_name))
Example #22
0
def revert_commit():
    """Revert project to a previous commit"""
    return_code = 0
    if not fabfile_settings.validate_setting(REVERT_COMMIT):
        print('Error revert commit key not valid')
        exit(1)
    if not files.exists(PROJECT_PATH):
        with hide('running'):
            run('mkdir -p %s' % (PROJECT_PATH))
    with hide('running', 'stdout'):
        put(DEPLOY_SCRIPT_LOCAL, DEPLOY_SCRIPT_PATH, mode=0755) 
        res = run('bash %s -p %s -u %s -b %s -r %s' % (DEPLOY_SCRIPT_PATH, PROJECT_PATH, PROJECT_URL, BRANCH, REVERT_COMMIT))
        print ''
        if 'Error' in res:
            print('[%s]: Failed' % (env.host_string))
            print res
            print 'Deploy cancelled.'
            return_code = 1
        elif fabfile_settings.deploy_ready_confirm(env.host_string, HOSTS_FILE) and fabfile_settings.wait_all_host_ready(env.hosts, HOSTS_FILE, WAIT_DEPLOY_TIMEOUT):
            print('[%s]: Success' % (env.host_string))
            print res
            print ''
            for cmd in COMMAND:
                with cd(ORIGIN_PATH):
                    print('Run: %s' % (cmd))
                    output = run('%s' % (cmd))
                    if output:
                        print output
            res_c = run('bash %s -p %s -u %s -b %s -c' % (DEPLOY_SCRIPT_PATH, PROJECT_PATH, PROJECT_URL, BRANCH))
            print res_c
            DEPLOY_END_TIME = datetime.now()
            deploy_total_time = (DEPLOY_END_TIME - DEPLOY_START_TIME).seconds
            if deploy_total_time < 5:
                time.sleep(5)
            else:
                time.sleep(3)
            if env.host_string == env.hosts[0]:
                msg_subject = 'Deploying product ' + PROJECT_NAME
                msg_body = '[' + DEPLOY_DATETIME + ']: Successfully deployed ' + PROJECT_NAME + ' on ' + str(len(env.hosts)) + ' servers ' + str(env.hosts) + ' in ' + str(deploy_total_time) + 's' + '\n\n' + 'Deloyment log:' + '\n' + res_c
                fabfile_settings.notify_deployment(subject=msg_subject, body=msg_body, sysadmin=NOTIFY_SYSADMIN, devaddrs=NOTIFY_DEV_ADDRS)
            print 'Total time: ' + str(deploy_total_time) + 's'
        else:
            if files.exists(LOCK_FILE_PATH):
                with hide('running', 'stdout'):
                    run('rm %s' % (LOCK_FILE_PATH))
            print('[%s]: Failed' % (env.host_string))
            print 'Deploy cancelled.'
            return_code = 1
        fabfile_settings.deploy_finish_confirm(HOSTS_FILE)
        print('=======================================================================')
        exit(return_code)
Example #23
0
def setup_openvswitch(bridge_name='ovsbr0', internal_bridge_name='ovsbr-int',
        tep_network='172.24.1.0'):
    check_valid_os()
    print(':: Configuring Open vSwitch on {}'.format(env.host_string))
    with settings(warn_only=True), hide('stdout', 'running', 'warnings'):
        out = run('which ovs-vsctl')
        if out == '':
            execute(install_openvswitch)
        sudo('ovs-vsctl add-br {}'.format(bridge_name))
        sudo('ovs-vsctl add-br {}'.format(internal_bridge_name))
        hostname = run('hostname -s | md5sum | head -c 8')
        host_ip = run("ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'")
        tep_name = 'tep-{}'.format(hostname)
        tep_ip = '{}.{}'.format('.'.join(tep_network.split('.')[0:2]),
                host_ip.split('.')[-1])
        gre_name = 'gre-{}'.format(hostname)
        sudo('ovs-vsctl add-port {0} {1} -- set interface {1} type=internal'.format(
            bridge_name, tep_name))
        sudo('ifconfig {} {} netmask 255.255.255.0'.format(
            tep_name, tep_ip))
        tep_ips = []
        host_ips = []
        current_host = env.host_string
        # loop through hosts to get tep_ips
        # i'm sure this isn't efficient but this is the best way i could get
        # with the way fabric handles hosts
        for host in env.hosts:
            env.host_string = host
            host_ip = run("ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'")
            host_ips.append(host_ip)
            tep_ip = '{}.{}'.format('.'.join(tep_network.split('.')[0:-1]),
                    host_ip.split('.')[-1])
            tep_ips.append(tep_ip)
        env.host_string = current_host
    # loop through all hosts and setup the GRE tunnels
    current_host = env.host_string
    for host in env.hosts:
        env.host_string = host
        with settings(warn_only=True), hide('stdout', 'running', 'warnings'):
            hostname = run('hostname -s | md5sum | head -c 8')
            host_ip = run("ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'")
            gre_name = 'gre-{}'.format(hostname)
            tep_ip = '{}.{}'.format('.'.join(tep_network.split('.')[0:-1]),
                    host_ip.split('.')[-1])
            for ip in host_ips:
                if ip != host_ip:
                    with settings(warn_only=True), hide('stdout', 'running', 'warnings'):
                        sudo('ovs-vsctl add-port {0} {1} -- set interface {1} type=gre \
                                options:remote_ip={2}'.format(
                                    internal_bridge_name, gre_name, ip))
    env.host_string = current_host
def commit(commit_message, push='n'):
    """ add, commit and push files for the project and extra apps"""
    for app in EXTRA_APPS:
        with settings(hide('warnings'), warn_only=True):
            print "Committing changes to %s " % app['app_dir']
            local("cd %s && git add . && git commit -m '%s'" % (app['app_dir'], commit_message))
            if push == 'y':
                local("cd %s && git push" % app['app_dir'])
            
    with settings(hide('warnings'), warn_only=True):
        print "Committing changes to %s " % PROJECT_DIR_STAGING
        local("cd %s && git add . && git commit -m '%s'" % (PROJECT_DIR_STAGING, commit_message))
        if push == 'y':
            local("cd %s && git push" % PROJECT_DIR_STAGING)
Example #25
0
File: node.py Project: stevec7/gpfs
def get_gpfs_software_levels(node):
    """
    Get the GPFS software levels for the following packages:
    gpfs.base
    gpfs.docs
    gpfs.gpl
    gpfs.gplbin
    gpfs.msg.en_US

    @param node: node to check software levels on
    @type node: string

    @return software: dict of software levels
    @rtype: dict
    """

    software_levels = {}

    f = StringIO.StringIO()
    env.host_string = str(node)
    env.output_prefix = ''

    # first, get the normal packages
    with hide('running'):
        run('rpm -q gpfs.base gpfs.gpl gpfs.docs gpfs.msg.en_US --queryformat "%{name} %{version}-%{release}\\n"', stdout=f)


    for line in f.getvalue().splitlines():
        key = line.split()[0]
        value = line.split()[1]
        software_levels[key] = value

    # the gpfs.gplbin package is harder
    # 
    # need to get the kernel version, and then do some crappy grepping
    #
    # the problem is that there could be more than one gpfs.gplbin rpm
    #   installed, so do something with that as well...
    f.truncate(0)

    with hide('running'):
        run('rpm -qa | grep gpfs.gplbin | grep `uname -r` | xargs -I\'{}\' rpm -q \'{}\' --queryformat "%{name} %{version}-%{release}\\n"', 
            stdout=f)

    for line in f.getvalue().splitlines():
        key = line.split()[0]
        value = line.split()[1]
        software_levels[key] = value

    return software_levels
Example #26
0
def create_user():
    """Create system user/group with home directory"""
    user = env.user
    with settings(hide('running')):
        local('echo "Switching to power user \'{}\'"'.format(env.poweruser))
    with settings(user=env.poweruser, warn_only=True):
        with settings(hide('everything')):
            group_exists = run('getent group {}'.format(env.group)).succeeded
            user_extsts = run('getent passwd {}'.format(user)).succeeded
        if not group_exists:
            sudo('/usr/sbin/groupadd {}'.format(env.group))
        if not user_extsts:
            # home dir is created in this step, so do not remove it afterwards
            sudo('/usr/sbin/useradd -m -g {0} {1}'.format(env.group, user))
            sudo('/usr/bin/passwd {}'.format(user))
Example #27
0
def enable_apt_repositories(prefix, url, version, repositories):
    """ adds an apt repository """
    with settings(hide('warnings', 'running', 'stdout'),
                  warn_only=False, capture=True):
        sudo('apt-add-repository "%s %s %s %s"' % (prefix,
                                                   url,
                                                   version,
                                                   repositories))
        with hide('running', 'stdout'):
            output = sudo("DEBIAN_FRONTEND=noninteractive /usr/bin/apt-get update")
        if 'Some index files failed to download' in output:
            raise SystemExit(1)
        else:
            # if we didn't abort above, we should return True
            return True
Example #28
0
 def _create_keypair(self, key_name):
     username = self.inputs.host_data[self.cfgm_ip]['username']
     password = self.inputs.host_data[self.cfgm_ip]['password']
     try:
         # Check whether the rsa.pub and keypair matches
         # On pre icehouse novaclient #1223934 observed so get() fails
         # keypair = self.obj.keypairs.get(keypair=key_name)
         keypairs = [x for x in self.obj.keypairs.list() if x.id == key_name]
         if not keypairs:
             raise novaException.NotFound('keypair not found')
         pkey_in_nova = keypairs[0].public_key.strip()
         with settings(host_string='%s@%s' % (username, self.cfgm_ip),
                 password=password, warn_only=True, abort_on_prompts=True):
             with hide('everything'):
                 if exists('.ssh/id_rsa.pub'):
                     output = get('.ssh/id_rsa.pub', '/tmp/')
                     pkey_in_host = open('/tmp/id_rsa.pub', 'r').read().strip()
                     if pkey_in_host == pkey_in_nova:
                         self.logger.debug('Not creating keypair since it exists')
                         return True
         self.logger.error('Keypair and rsa.pub doesnt match.')
         raise Exception('Keypair and rsa.pub doesnt match.'
                         ' Seems rsa keys are updated outside of test env.'
                         ' Delete nova keypair and restart the test')
     except novaException.NotFound:
         pass
     with hide('everything'):
         with settings(
             host_string='%s@%s' % (username, self.cfgm_ip),
                 password=password, warn_only=True, abort_on_prompts=True):
             rsa_pub_arg = '.ssh/id_rsa'
             self.logger.debug('Creating keypair %s' % (key_name)) 
             if exists('.ssh/id_rsa.pub'):  # If file exists on remote m/c
                 self.logger.debug('Public key exists. Getting public key') 
                 get('.ssh/id_rsa.pub', '/tmp/')
             else:
                 self.logger.debug('Making .ssh dir')
                 run('mkdir -p .ssh')
                 self.logger.debug('Removing id_rsa*')
                 run('rm -f .ssh/id_rsa*')
                 self.logger.debug('Creating key using : ssh-keygen -f -t rsa -N') 
                 run('ssh-keygen -f %s -t rsa -N \'\'' % (rsa_pub_arg))
                 self.logger.debug('Getting the created keypair')
                 get('.ssh/id_rsa.pub', '/tmp/')
             self.logger.debug('Reading publick key')
             pub_key = open('/tmp/id_rsa.pub', 'r').read()
             self.obj.keypairs.create(key_name, public_key=pub_key)
     return True
Example #29
0
    def _verify_os_proc(self, all_hosts):
        """
        Verifies supported OS, correct chip architecture and 
        recommended minimum number of processors on all cloud components.  
  
        :param all_hosts: a set of Eucalyptus cloud components
        """
        self.info("Operation System and Processor verification on all hosts")
        os_output = "cat /etc/system-release"
        with hide("everything"):
            os_version = self.run_command_on_hosts(os_output, all_hosts)

        os_search_string = "(CentOS|Red).*(" + str(self.os_version) + ".\w+)"
        for host in all_hosts:
            if re.search(os_search_string, os_version[host]):
                self.success(host + ": Correct OS Version")
            else:
                self.failure(host + ": Incorrect OS Version")

        arch_output = "uname -m"
        with hide("everything"):
            arch_version = self.run_command_on_hosts(arch_output, all_hosts)

        for host in all_hosts:
            if re.search("x86_64", arch_version[host]):
                self.success(host + ": Correct chip architecture")
            else:
                self.failure(host + ": Incorrect chip architecture")

        cpu_output = "cat /proc/cpuinfo | grep processor"
        cputype_output = 'cat /proc/cpuinfo | grep "model name"'
        with hide("everything"):
            cpu_count = self.run_command_on_hosts(cpu_output, all_hosts)
            cpu_type = self.run_command_on_hosts(cputype_output, all_hosts)

        for host in all_hosts:
            cpus = re.findall("processor", cpu_count[host])
            if len(cpus) >= 2:
                self.success(host + ": Passed minimum number of" + " processors requirement")
            else:
                self.failure(host + ": Failed minimum number of" + " processors requirement")

            proc_type = re.findall("(model).*([Intel|AMD].*)\w+", cpu_type[host])

            if len(cpus) == len(proc_type):
                self.success(host + ": Passed requirement of " + "Intel/AMD processor support")
            else:
                self.failure(host + ": Failed requirement of " + "Intel/AMD processor support")
Example #30
0
File: fabfile.py Project: bsu/GWM2
def install_dependencies_pip():
    """
    Install all dependencies available from pip
    """
    require('environment', provided_by=[dev, prod])
    create_virtualenv()

    # if this is a development install then filter out anything we have a
    # git repo for.
    pips_ = PIP_INSTALL.copy()
    if env.environment == 'development':
        map(pips_.pop, [k for k in GIT_INSTALL if k in PIP_INSTALL])
    
    if not pips_:
        print 'No git repos to install'
        return

    with lcd(env.doc_root):
        #XXX create temp requirements file text from list of requirements
        #    it will be destroyed after install is complete
        requirements = '\n'.join([''.join(p) for p in pips_.items()])
        with settings(hide('running')):
            local("echo '%s' > requirements.txt" % requirements)

        local('pip install -E %(virtualenv)s -r requirements.txt' % env)
        local('rm requirements.txt')
Example #31
0
def create_automatic_migration():
    """
    Runs south schemamigration on all apps returned by "get_apps_to_migrate".
    Each schema migration is run with "--auto" flag
    """
    with cd(env.SRC_PATH):
        apps = run('%s fabfiles/django_scripts/get_apps_to_migrate.py' %
                   env.PYTHON_BIN).split('\n')
        with settings(hide('warnings'), warn_only=True):
            for app in apps:
                output = sudo('%s manage.py schemamigration %s --auto' %
                              (env.PYTHON_BIN, app.strip()))

                # Raise any error other than nothing seems to have changed
                if output.failed:
                    if not 'Nothing seems to have changed' in output:
                        raise Exception(
                            'Error when running automated schema migration')
Example #32
0
    def local_changes_test(self):

        with settings(hide('warnings', 'running', 'stdout', 'stderr')):

            git_sync = self.get_git_sync()
            git_sync.run_initial_sync()

            with open("/vagrant/scratch/one.txt", "a") as file_one:
                file_one.write("Now is the winter of our discontent.")
                file_one.close()

            git_sync.run_sync()

            execute(
                self.check_local_change_on_server,
                self,
                hosts=[self.host],
            )
Example #33
0
 def check_x_files(tomcat_path):
     """
     check if all the .sh file can execute in ${tomcat_paht}/bin/
     Args:
         tomcat_path (string): tomcat path
     """
     cmd = 'ls %s/bin|grep .sh' % tomcat_path
     with settings(hide('warnings', 'running', 'stdout', 'stderr'),
                   warn_only=True):
         rec = run(cmd)
     if rec.return_code != 0:
         return False
     xfiles = rec.splitlines()
     for xfile in xfiles:
         cmd = 'test -x %s/bin/%s' % (tomcat_path, xfile)
         if run(cmd).return_code != 0:
             return False
     return True
Example #34
0
def repo_deb_add(package, dist='ubuntu16.04'):
    '''upload and add package to an apt repo, defaults to ubuntu16.04

    Example:
    % fab repo_deb_add:myfoo.deb,dist=ubuntu16.04
    '''

    if not os.path.isfile(os.path.expanduser(package)):
        abort('could not upload {0}: file not found'.format(package))

    with hide('commands'):
        put(package, '{0}/archive/{1}'.format(env.repo_deb_root, dist))
        package = package.split('/')[-1]
        run('/usr/bin/aptly -config=/etc/aptly-{0}.conf repo add -force-replace {0} {1}/archive/{0}/{2}'
            .format(dist, env.repo_deb_root, package))

    if republish(dist):
        print green('added {0} to repo {1}'.format(package, dist))
Example #35
0
File: node.py Project: stevec7/gpfs
def mount_filesystem(filesystem, node):
    """Mount GPFS filesystems on a given node
    
    @param filesystem: filesystem to mount, 'all' mounts all eligible
        GPFS filesystems on that node
    @type filesystem: string
    @param node: node to run command on
    @type node: string

    @return NOTHING
    """

    env.host_string = node

    with hide('everything'):
        run("mmmount %s" % filesystem)

    return
    def test_parallel_network_error(self, error_mock):
        """
        network error should call error
        """

        network_error = NetworkError('Network message')
        fabric.state.env.warn_only = False

        @parallel
        @hosts('127.0.0.1:2200', '127.0.0.1:2201')
        def task():
            raise network_error

        with hide('everything'):
            execute(task)
        error_mock.assert_called_with('Network message',
                                      exception=network_error.wrapped,
                                      func=fabric.utils.abort)
Example #37
0
    def _apt_install_remote_deb(self, version=None):
        if not version:
            self._apt_install_remote_deb_latest()

        else:
            print blue('installing %s %s ' % (self._service_name, version))
            with hide('stdout'):
                sudo('apt-get update')
            sudo(
                'apt-get install '
                '-y '
                '--force-yes '
                '-o Dpkg::Options::="--force-confold" '
                '%s=%s'  % (self._service_name, version)
            )

        jar_name = run('readlink %s.jar' % self._rpath("current", self._service_name))
        print green('apt installed new jar: %s' % jar_name)
Example #38
0
def compile(upgrade="", package=None):
    """Update list of requirements"""
    if upgrade and package:
        abort("Can only specify one of `upgrade` or `package`")
    if package:
        puts(blue("Upgrading spec for {}".format(package)))
    elif upgrade:
        puts(blue("Upgrading all package specs"))
    _pre_check()
    upgrade = (upgrade.lower() in {'true', 'upgrade', '1', 'yes', 'up'})
    with hide('running', 'stdout'):
        puts(green("Updating requirements"), show_prefix=True)
        for file in REQ_DIR.glob('*.in'):
            puts(blue("  - {}".format(file.name.replace(".in", ""))))
            local('pip-compile --no-index {}{} --rebuild {}'.format(
                '--upgrade' if upgrade or package else '',
                '-package {}'.format(package) if package else '',
                file.relative_to(HERE)))
Example #39
0
def teardown():
    env.warn_only = True
    with hide('stdout', 'warnings'):
        print(':: Tearing down Shipyard Redis')
        sudo('docker kill shipyard_redis')
        sudo('docker rm shipyard_redis')
        print(':: Tearing down Shipyard Load Balancer')
        sudo('docker kill shipyard_lb')
        sudo('docker rm shipyard_lb')
        print(':: Tearing down Shipyard Router')
        sudo('docker kill shipyard_router')
        sudo('docker rm shipyard_router')
        print(':: Tearing down Shipyard DB')
        sudo('docker kill shipyard_db')
        sudo('docker rm shipyard_db')
        print(':: Tearing down Shipyard')
        sudo('docker kill shipyard')
        sudo('docker rm shipyard')
Example #40
0
    def _upload_template_obeys_lcd(self, jinja, mirror):
        template_content = {True: '{{ varname }}s', False: '%(varname)s'}

        template_dir = 'template_dir'
        template_name = 'template.txt'
        if not self.exists_locally(self.path(template_dir)):
            os.mkdir(self.path(template_dir))

        self.mkfile(os.path.join(template_dir, template_name),
                    template_content[jinja])

        remote = '/configfile.txt'
        var = 'foobar'
        with hide('everything'):
            with lcd(self.path(template_dir)):
                upload_template(template_name,
                                remote, {'varname': var},
                                mirror_local_mode=mirror)
Example #41
0
def main(args):

    state = tree()
    env.hosts = [
        args.destnode,
    ]
    env.use_hostbased = True
    cluster = GPFSCluster(state)

    # this builds a complete GPFS cluster state defaultdict
    with settings(hide('running'), output_prefix='', warn_only=True):
        execute(cluster.build_cluster_state)
        execute(cluster.get_managers)
        execute(cluster.get_all_kernel_and_arch)
        execute(cluster.get_all_gpfs_baserpm)

    # write all of this to a json dump
    json.dump(state, open(args.jsonfile, 'w'))
Example #42
0
 def put_file_to_remote(self, file_name, target_path, **kwargs):
     """
     Upload one or more files to a remote host from local host
     :param file_name: path and files to be copied into server
     :param target_path: path where will be put the file
     :param hide_run: show message or not (True or False)
     :param use_sudo:  superuser privileges (True | False)
     :param mode: to specify an exact mode (chmod)
     """
     hide_run = kwargs.get(HIDE, self.hide)
     sudo_run = kwargs.get(SUDO, self.sudo)
     mode = kwargs.get(MODE, None)
     if hide_run:
         with hide('running', 'stdout', 'stderr'):
             put(local_path=file_name,
                 remote_path=target_path,
                 use_sudo=sudo_run,
                 mode=mode)
Example #43
0
def authorize_ssh_keys():
    keyfile = '.ssh/authorized_keys'
    keydir = os.path.join(
        os.path.dirname(__file__),
        'ssh-keys',
    )
    keys = []
    for filename in os.listdir(keydir):
        if filename.startswith('.'):
            continue
        if not filename.endswith('.pub'):
            continue
        keys.extend(
            line.rstrip('\n') for line in file(os.path.join(keydir, filename)))
    with hide('running'):
        for key in keys:
            run('grep -q "%s" %s || echo "%s" >> %s' %
                (key, keyfile, key, keyfile))
Example #44
0
def RunCommand(cfg, host, cmd):
    key = Path(cfg.configpath).joinpath(cfg.keypath)
    key = Path(key).joinpath(cfg.keyname)
    if cfg.log_level == "DEBUG":
        with settings(show('everything'),
                      user='******',
                      host_string=host,
                      key_filename=key.as_posix(),
                      warn_only=True):
            results = fabric_run(cmd)
    else:
        with settings(hide('everything'),
                      user='******',
                      host_string=host,
                      key_filename=key.as_posix(),
                      warn_only=True):
            results = fabric_run(cmd)
    return results
Example #45
0
def generatemedia(mode="dev", optimize=0):
    """generates the media for production"""
    settings(mode="prod")
    local("python manage.py generatemedia", capture=False)
    #optimize the pngs using optipng and jpgs using jpegtran
    #optimizations are in place and lossless
    if optimize:
        puts("Optimizing images...")
        with hide('running', 'stdout', 'stderr'):
            for root, dirs, files in os.walk("_generated_media"):
                for f in files:
                    fname = os.path.join(root, f)
                    if f.endswith(".png"):
                        local("optipng -quiet -o7 %s" % (fname))
                    elif f.endswith(".jpg") or f.endswith(".jpeg"):
                        local("jpegtran -copy none -optimize -outfile %s %s" %
                              (fname, fname))
    settings(mode=mode)
Example #46
0
def bootstrap(tag='master'):
    """Bootstrap the deployment using the specified branch"""
    require('environment', provided_by=[production, staging])
    print(MOTD_PROD if _is_prod() else MOTD_STAG)
    msg = colors.red('\n%(project_path)s exists. '
                     'Do you want to continue anyway?' % env)

    if (not exists('%(project_path)s' % env)
            or confirm(msg, default=False)):
        with settings(hide('stdout', 'stderr')):
            _init_directories()
            _init_virtualenv()
            _git_clone_tag(tag=tag)
            _install_requirements()
            update_config(tag=tag)  # upload new config files
            enable_site()
    else:
        sys.exit('\nAborting.')
Example #47
0
 def service_wrapper(self, cmd, print_output=True, warn_only=False):
     """
     [advanced]\t'start', 'stop', 'restart', or get the 'status' of your service
     """
     allowed = ('start', 'stop', 'restart', 'status')
     if cmd not in allowed:
         print red('unknown command, try one of %s' % ','.join(allowed))
         return
     with hide('status', 'running', 'stdout'):
         if print_output:
             print blue('executing init:%s' % cmd)
         out = sudo("SYSTEMD_PAGER='' service %s %s" %
                    (self._service_name, cmd),
                    warn_only=warn_only,
                    quiet=not print_output)
         if print_output:
             print green(out)
         return out
Example #48
0
    def coordinator_config():
        config_path = os.path.join(REMOTE_CONF_DIR, CONFIG_PROPERTIES)
        config_host = env.roledefs['coordinator'][0]
        try:
            data = StringIO()
            with settings(host_string='%s@%s' % (env.user, config_host)):
                with hide('stderr', 'stdout'):
                    temp_dir = run('mktemp -d /tmp/prestoadmin.XXXXXXXXXXXXXX')
                try:
                    get(config_path, data, use_sudo=True, temp_dir=temp_dir)
                finally:
                    run('rm -r %s' % temp_dir)

            data.seek(0)
            return PrestoConfig.from_file(data, config_path, config_host)
        except:
            _LOGGER.info('Could not find Presto config.')
            return PrestoConfig(None, config_path, config_host)
    def test_should_set_all_hosts(self):
        """
        should set env.all_hosts to its derived host list
        """
        hosts = ['a', 'b']
        roledefs = {'r1': ['c', 'd']}
        roles = ['r1']
        exclude_hosts = ['a']

        def command():
            self.assertEqual(set(env.all_hosts), set(['b', 'c', 'd']))

        task = Fake(callable=True, expect_call=True).calls(command)
        with settings(hide('everything'), roledefs=roledefs):
            execute(task,
                    hosts=hosts,
                    roles=roles,
                    exclude_hosts=exclude_hosts)
Example #50
0
def port_is_open():
    """
    Determine if the default port and user is open for business.
    """
    with settings(hide('aborts'), warn_only=True ):
        try:
            if env.verbosity:
                print "Testing node for previous installation on port %s:"% env.port
            distribution = lsb_release()
        except KeyboardInterrupt:
            if env.verbosity:
                print >> sys.stderr, "\nStopped."
            sys.exit(1)
        except: #No way to catch the failing connection without catchall? 
            return False
        if distribution.distributor_id <> 'Ubuntu':
            print env.host, 'WARNING: Woven has only been tested on Ubuntu >= 10.04. It may not work as expected on',distribution.description
    return True
Example #51
0
def install_os_updates(distribution, force=False):
    """ installs OS updates """
    if ('centos' in distribution or 'rhel' in distribution
            or 'redhat' in distribution):
        bookshelf2.logging_helpers.log_green('installing OS updates')
        sudo("yum -y --quiet clean all")
        sudo("yum group mark convert")
        sudo("yum -y --quiet update")

    if ('ubuntu' in distribution or 'debian' in distribution):
        with settings(hide('warnings', 'running', 'stdout', 'stderr'),
                      warn_only=False,
                      capture=True):
            sudo("DEBIAN_FRONTEND=noninteractive apt-get update")
            if force:
                sudo("apt-get -y upgrade --force-yes")
            else:
                sudo("apt-get -y upgrade")
Example #52
0
 def file_transfer(self, type, node_file, local_file):
     with settings(hide('everything'),host_string='%s@%s' % (
         self.username, self.ip), password=self.password, warn_only=True,
         abort_on_prompts=False):
         if type == "get":
             result = get(node_file, local_file)
             self.logger.debug(result)
             if result.failed:
                 self.logger.warn('Failed to get %s(as %s) from %s' % (
                     node_file, local_file, self.ip))
             return result.succeeded
         if type == "put":
             result = put(node_file, local_file)
             self.logger.debug(result)
             if result.failed:
                 self.logger.error('Failed to upload %s(as %s) to %s' % (
                     node_file, local_file, self.ip))
             return result.succeeded
Example #53
0
 def _prepare_fs(self, cookbook_repo, branch, debug, update_repo):
     ChefManager.install_chef_dk()
     ChefManager.create_chef_repo()
     with hide(*self.hidden_outputs):
         local('if [ ! -d eucalyptus-cookbook ]; then '
               'git clone '
               '{0} eucalyptus-cookbook;'
               'fi'.format(cookbook_repo))
         if update_repo:
             print green('updating eucalyptus-cookbook')
             local(
                 'cd eucalyptus-cookbook; git checkout {0};'.format(branch))
             local('cd eucalyptus-cookbook; git pull origin {0};'.format(
                 branch))
     ChefManager.download_cookbooks('eucalyptus-cookbook/Berksfile',
                                    os.path.join(self.chef_repo_dir +
                                                 '/cookbooks'),
                                    debug=debug)
Example #54
0
 def run_cmd_on_server(self,
                       server_ip,
                       issue_cmd,
                       username=None,
                       password=None,
                       pty=True):
     if server_ip in self.host_data.keys():
         if not username:
             username = self.host_data[server_ip]['username']
         if not password:
             password = self.host_data[server_ip]['password']
     with hide('everything'):
         with settings(host_string='%s@%s' % (username, server_ip),
                       password=password,
                       warn_only=True,
                       abort_on_prompts=False):
             output = run('%s' % (issue_cmd), pty=pty)
             return output
Example #55
0
def install_controller_dev():
    '''
    Install Cozy Controller Application Manager. Daemonize with supervisor.
    '''
    require.nodejs.package('cozy-controller')
    require.supervisor.process('cozy-controller',
                               command='cozy-controller -c -u --per 755',
                               environment='NODE_ENV="development"',
                               user='******')
    supervisor.restart_process('cozy-controller')

    time.sleep(5)
    with hide('running', 'stdout'):
        result = run('curl -X GET http://127.0.0.1:9002/')
    if result != '{"message":"No drones specified"}':
        print_failed("cozy-controller")

    print(green('Cozy Controller successfully started'))
Example #56
0
def mysql_conf():
    """Set up .my.cnf file for passwordless MySQL operation."""
    require('environment', provided_by=[production, staging])

    print('\n\nSetting up MySQL password configuration...')

    conf_filename = '~/.my.cnf'

    if (not exists(conf_filename) or
        confirm('\n%s already exists. Do you want to overwrite it?'
                % conf_filename, default=False)):

        with settings(hide('stdout', 'stderr')):
            upload_template('deploy/my.cnf', conf_filename, context=env)
            run('chmod 600 %s' % conf_filename)

    else:
        abort('\nAborting.')
Example #57
0
def bootstrap():
    """Creates initial directories and virtualenv"""
    require('environment', provided_by=[production, staging])

    if (exists('%(project_path)s' % env) and \
        confirm('%(project_path)s already exists. Do you want to continue?' \
                % env, default=False)) or not exists('%(project_path)s' % env):

        print('Bootstrapping initial directories...')

        with settings(hide('stdout', 'stderr')):
            _init_directories()
            _init_virtualenv()
            _clone_repo()
            _checkout_repo()
            _install_requirements()
    else:
        print('Aborting.')
Example #58
0
def compile(upgrade="", package=None):
    """Update list of requirements"""

    if upgrade and package:
        abort("Can only specify one of `upgrade` or `package`")
    if package:
        puts(blue("Upgrading spec for {}".format(package)))
    elif upgrade:
        puts(blue("Upgrading all package specs"))
    _pre_check()
    upgrade = (upgrade.lower() in {"true", "upgrade", "1", "yes", "up"})
    with hide("running", "stdout"):
        puts(green("Updating requirements"), show_prefix=True)
        fab_compile_requirements_file(REQ_DIR / "base.in", upgrade, package)
        fab_compile_requirements_file(REQ_DIR / "dev.in", upgrade, package)
        fab_compile_requirements_file(REQ_DIR / "tests.in", upgrade, package)
        fab_compile_requirements_file(REQ_DIR / "blockchain.in", upgrade, package)
        fab_compile_requirements_file(REQ_DIR / "pandapower.in", upgrade, package)
Example #59
0
def getMeasures():
    try:
        timestamp = datetime.datetime.utcnow().replace(tzinfo=utc)
        now = timestamp - datetime.timedelta(seconds=timestamp.second, microseconds=timestamp.microsecond)
        env.skip_bad_hosts = True
        env.timeout = 10
        env.connection_attempts = 1
        servers = Server.objects.filter(is_measuring=False)
        if servers:
            env.hosts = ["%s@%s" % (server.username, server.ip) for server in servers]
            env.passwords = dict([("%s@%s" % (server.username, server.ip), server.password) for server in servers])
            with hide('everything'):
                execute(launch_command, now)
    except:
        total_time = datetime.datetime.utcnow().replace(tzinfo=utc) - timestamp
        duration = float(int((total_time.seconds * 1000000) + total_time.microseconds) / 1000000.0)
        CronLog.objects.create(timestamp=timestamp, action="sensors", server=None, success=False, duration=duration, message=traceback.format_exc())
    return 0
Example #60
0
 def check_nofile():
     """check if nofile is set to 1048576"""
     cmd = 'grep 1048576 /etc/security/limits.conf | grep nofile'
     with settings(hide('warnings', 'running', 'stdout', 'stderr'),
                   warn_only=True):
         res = run(cmd)
     if res.return_code != 0:
         return False
     # fabric automatically converts newline in outputs (but not file contents)
     # thus "os.linesep" must be used here.
     lines = res.split(os.linesep)
     cnt = 0
     for line in lines:
         if not line.startswith("#"):
             cnt += 1
     if cnt == 2:
         return True
     return False