Beispiel #1
0
def enable(program, do_reload=True):
    """
    Enable program.

    :param program: Program to enable
    :param do_reload: Reload supervisor
    :return: Got enabled?
    """
    enabled = False
    program = program if program.endswith(".conf") or program == "default" else "{}.conf".format(program)

    with sudo():
        available_program = os.path.join(programs_available_path, program)
        if not files.exists(available_program):
            warn("Invalid program: {}".format(program))
        else:
            with cd(programs_enabled_path):
                if not files.exists(program):
                    info("Enabling program: {}", program)
                    with silent():
                        debian.ln(available_program, program)
                        enabled = True
                    if do_reload:
                        reload()

    return enabled
Beispiel #2
0
def ssh_config(host_string=None):
    """
    Return ssh configuration dict for current env.host_string host value.

    Memoizes the loaded SSH config file, but not the specific per-host results.

    This function performs the necessary "is SSH config enabled?" checks and
    will simply return an empty dict if not. If SSH config *is* enabled and the
    value of env.ssh_config_path is not a valid file, it will abort.

    May give an explicit host string as ``host_string``.
    """
    from fabric.state import env
    dummy = {}
    if not env.use_ssh_config:
        return dummy
    if '_ssh_config' not in env:
        try:
            conf = ssh.SSHConfig()
            path = os.path.expanduser(env.ssh_config_path)
            with open(path) as fd:
                conf.parse(fd)
                env._ssh_config = conf
        except IOError:
            warn("Unable to load SSH config file '%s'" % path)
            return dummy
    host = parse_host_string(host_string or env.host_string)['host']
    return env._ssh_config.lookup(host)
Beispiel #3
0
def notify_start(title, revision=None, changes=None, max_changes=8):
    """
    Send a message to slack about the start of a deployment

    :return str: plaintext message part
    """
    from .project import github_link

    summary = _deploy_summary(title, revision)
    summary["color"] = "warning"

    if changes:
        try:
            base_url = github_link()
            log_template = u'`<{base_url}/commit/{rev}|{rev}>` {msg}'
            formatted_changes = [log_template.format(base_url=base_url, rev=rev, msg=msg)
                                 for rev, msg in changes[:max_changes]]

            if len(changes) > max_changes:
                formatted_changes.append('(+ {} more)'.format(len(changes[max_changes:])))

            summary['fields'].append({
                'title': 'Changes',
                'value': u'\n'.join(formatted_changes),
                'short': False
            })

        except Exception:
            warn("Changelog formatting failed, skipping")

    slack.notify(None, summary)
def syntax_check():
    """Runs flake8 against the codebase."""
    with fab_settings(warn_only=True):
        for file_type in settings.SYNTAX_CHECK:
            needs_to_abort = False
            # because egrep fails with exit code 1, we need to allow this as
            # a successful exit code in our env
            if 1 not in env.ok_ret_codes:
                env.ok_ret_codes.append(1)
            output = local(
                'find -name "{}" -print'.format(file_type),
                capture=True,
            )
            files = output.split()
            for file in files:
                if any(s in file for s in settings.SYNTAX_CHECK_EXCLUDES):
                    continue
                result = local('egrep -i -n "{0}" {1}'.format(
                    settings.SYNTAX_CHECK[file_type], file),
                               capture=True)
                if result:
                    warn(
                        red("Syntax check found in '{0}': {1}".format(
                            file, result)))
                    needs_to_abort = True
            if needs_to_abort:
                abort(
                    red('There have been errors. Please fix them and run'
                        ' the check again.'))
            else:
                puts(green('Syntax check found no errors. Very good!'))
 def link_release(self, release_dir=None):
     release_dir = self.getNextReleaseIfReady(release_dir)
     if release_dir:
         run("rm -f %s" % self.settings.instance_code_dir)
         run("ln -s %s %s" % (release_dir, self.settings.instance_code_dir))
     else:
         warn("Release is not ready yet.")
Beispiel #6
0
def print_results(results):
    sio = StringIO()
    try:
        csvwriter = csv.writer(sio)
        csvwriter.writerow([' '] + results.keys())
        for tohost in results.keys():
            row = []
            for colheader in results.keys():
                try:
                    row.append(results[tohost][colheader]['tput'])
                except (AttributeError, KeyError):
                    # data point doesn't exist due to self-test or failure
                    row.append('X')
                except Exception as e:
                    # something else wrong with the data point,
                    # log error and mark output
                    warn("Error parsing datapoint for dest %s from %s" % (tohost,
                                                                          colheader))
                    warn("Got exception: %s" % e)
                    row.append('Error')

            csvwriter.writerow([tohost] + row)
        print sio.getvalue()
    finally:
        sio.close()
Beispiel #7
0
def stage_latest():
    """
    Copy version to latest within local cdn repository
    """
    version = CONFIG['version']
    
    if not version:   
        tags = _get_tags()
        puts('This project has the following tags:')
        puts(tags)
    
        while True:
            version = prompt("Which version to stage as 'latest'? ").strip()        
            if not version in tags:
                warn('You must enter an existing version')
            else:
                break
    
    print 'stage_latest: %s' % version
    
    # Make sure version has been staged
    version_cdn_path = join(env.cdn_path, version)
    if not os.path.exists(version_cdn_path): 
        abort("Version '%s' has not been staged" % version)
      
    # Stage version as latest           
    latest_cdn_path = join(env.cdn_path, 'latest')
    _clean(latest_cdn_path)
    _copy(version_cdn_path, latest_cdn_path)
Beispiel #8
0
def stage_latest():
    """
    Copy version to latest within local cdn repository
    """
    if 'version' in CONFIG:
        version = CONFIG['version']
    else:
        tags = _get_tags()
        puts('This project has the following tags:')
        puts(tags)
    
        while True:
            version = prompt("Which version to stage as 'latest'? ").strip()        
            if not version in tags:
                warn('You must enter an existing version')
            else:
                break
    
    print 'stage_latest: %s' % version
    
    # Make sure version has been staged
    version_cdn_path = join(env.cdn_path, version)
    if not os.path.exists(version_cdn_path): 
        abort("Version '%s' has not been staged" % version)
      
    # Stage version as latest           
    latest_cdn_path = join(env.cdn_path, 'latest')
    _clean(latest_cdn_path)
    copy([{"src": version_cdn_path, "dst": latest_cdn_path}])
Beispiel #9
0
    def provision(self):
        '''
        Installs `RabbitMQ <http://www.rabbitmq.com/>`_ and dependencies.
        This method should be called upon if overriden in base classes, or RabbitMQ won't work properly in the remote server.

        Example:
        ::

            from provy.core import Role
            from provy.more.centos import HgRole

            class MySampleRole(Role):
                def provision(self):
                    self.provision_role(RabbitMqRole)
        '''
        with self.using(YumRole) as role:
            role.ensure_up_to_date()
            role.ensure_package_installed('rabbitmq-server')

        # Start rabbitmq at startup, TODO: add chkconfig role
        self.execute('chkconfig --add rabbitmq-server', stdout=False,
                     sudo=True)
        self.execute('chkconfig rabbitmq-server on', stdout=False, sudo=True)

        # Make sure rabbit is running:
        if not self.is_process_running('rabbitmq-server'):
            self.execute(
                'service rabbitmq-server start', stdout=False, sudo=True,
            )

        if self.user_exists('guest'):
            warn('It is advisable to delete the guest user or change the'
                 ' password to something private, particularly if your broker'
                 ' is accessible publicly.')
Beispiel #10
0
    def provision(self):
        '''
        Installs `RabbitMQ <http://www.rabbitmq.com/>`_ and dependencies.
        This method should be called upon if overriden in base classes, or RabbitMQ won't work properly in the remote server.

        Example:
        ::

            from provy.core import Role
            from provy.more.debian import HgRole

            class MySampleRole(Role):
                def provision(self):
                    self.provision_role(RabbitMqRole)
        '''
        with self.using(AptitudeRole) as role:
            role.ensure_up_to_date()
            role.ensure_package_installed('rabbitmq-server')

        # Start rabbitmq at startup, TODO: add update-rc.d role
        self.execute('update-rc.d rabbitmq-server defaults', stdout=False,
                     sudo=True)
        self.execute('update-rc.d rabbitmq-server enable', stdout=False, sudo=True)

        # Make sure rabbit is running:
        if not self.is_process_running('rabbitmq-server'):
            self.execute(
                'service rabbitmq-server start', stdout=False, sudo=True,
            )

        if self.user_exists('guest'):
            warn(GUEST_USER_WARNING)
Beispiel #11
0
def stage_latest(version=''):
    """
    Copy version to local cdn repository
    """
    if not version:
        tags = _get_tags()
        puts('This project has the following tags:')
        puts(tags)
        
        while True:
            version = prompt("Which version to stage as 'latest'? ").strip()
        
            if not version in tags:
                warn('You must enter an existing version')
            else:
                break
     
    # Make sure version has been staged
    version_cdn_path = join(env.cdn_path, version)
    if not os.path.exists(version_cdn_path):
        abort("Version '%s' has not been staged" % version)
      
    # Stage version as latest           
    latest_cdn_path = join(env.cdn_path, 'latest')
    _clean(latest_cdn_path)
    _copy(version_cdn_path, latest_cdn_path)
Beispiel #12
0
def reset(branch, repository_path=None, **kwargs):
    """
    Fetch, reset, clean and checkout repository branch.

    :return: commit
    """
    commit = None

    if not repository_path:
        repository_path = debian.pwd()

    with cd(repository_path):
        name = os.path.basename(repository_path)
        info('Resetting git repository: {}@{}', name, branch)

        with silent('warnings'):
            commands = [
                'git fetch origin',  # Fetch branches and tags
                'git reset --hard HEAD',  # Make hard reset to HEAD
                'git clean -fdx',  # Remove untracked files pyc, xxx~ etc
                'git checkout HEAD',  # Checkout HEAD
                'git reset refs/remotes/origin/{} --hard'.format(branch)  # Reset to branch
            ]
            output = run(' && '.join(commands))

        if output.return_code != 0:
            warn('Failed to reset repository "{}", probably permission denied!'.format(name))
        else:
            output = output.split(os.linesep)[-1][len('HEAD is now at '):]
            commit = output.split()[0]
            info('HEAD is now at: {}', output)

    return commit
Beispiel #13
0
def upload_config ():
    #TODO improve
    if exists('production.ini'):
        put('production.ini', '/opt/kibra/', use_sudo = True, mode = 0400)
        sudo('chown kibra:kibra /opt/kibra/production.ini')
    else:
        warn('no production.ini found')
Beispiel #14
0
def enable(program, do_reload=True):
    """
    Enable program.

    :param program: Program to enable
    :param do_reload: Reload supervisor
    :return: Got enabled?
    """
    enabled = False
    program = program if program.endswith(
        '.conf') or program == 'default' else '{}.conf'.format(program)

    with sudo():
        available_program = os.path.join(programs_available_path, program)
        if not files.exists(available_program):
            warn('Invalid program: {}'.format(program))
        else:
            with cd(programs_enabled_path):
                if not files.exists(program):
                    info('Enabling program: {}', program)
                    with silent():
                        debian.ln(available_program, program)
                        enabled = True
                    if do_reload:
                        reload()

    return enabled
Beispiel #15
0
def enable(site, do_reload=True):
    """
    Enable site

    :param site: Site to enable
    :param do_reload: Reload nginx service
    :return: Got enabled?
    """
    enabled = False
    site = site if site.endswith(
        '.conf') or site == 'default' else '{}.conf'.format(site)

    with sudo():
        available_site = os.path.join(sites_available_path, site)
        if not files.exists(available_site):
            warn('Invalid site: {}'.format(site))
        else:
            with cd(sites_enabled_path):
                if not files.exists(site):
                    info('Enabling site: {}', site)
                    with silent():
                        debian.ln(available_site, site)
                        enabled = True
                    if do_reload:
                        reload()

    return enabled
Beispiel #16
0
 def get_missing_prerequisites(self, packages):        
     prerequisites = list(self.get_prerequisites(packages))
     
     for pkgline in prerequisites:
         (name, pkgtype, platform, resource, url, opt1, opt2) = packageline_split(pkgline)
 
         if opt1 == "unzipflat" or opt1 == "unzipflatroot" or opt1 == "unzipflatsecond":
             if not len(list(which(opt2))):
                 yield pkgline
         
         elif opt1 in ['exec', 'msiexec']:
             full_path = None
             warn("Information: searching c:\ for %s, this may take a while" % opt2)
             for root, dirs, files in os.walk("C:\\"):
                 for file in files:
                     if file == opt2:
                         full_path = os.path.join(root, file)
                         warn("Information: found %s at %s" % (opt2, full_path))
                         break
                 if full_path:
                     break
             if not full_path:
                 yield pkgline
         else:
             raise NotImplementedError
def syntax_check():
    """Runs flake8 against the codebase."""
    with fab_settings(warn_only=True):
        for file_type in settings.SYNTAX_CHECK:
            needs_to_abort = False
            # because egrep fails with exit code 1, we need to allow this as
            # a successful exit code in our env
            if 1 not in env.ok_ret_codes:
                env.ok_ret_codes.append(1)
            output = local(
                'find -name "{}" -print'.format(file_type),
                capture=True,
            )
            files = output.split()
            for file in files:
                if any(s in file for s in settings.SYNTAX_CHECK_EXCLUDES):
                    continue
                result = local('egrep -i -n "{0}" {1}'.format(
                    settings.SYNTAX_CHECK[file_type], file), capture=True)
                if result:
                    warn(red("Syntax check found in '{0}': {1}".format(
                        file, result)))
                    needs_to_abort = True
            if needs_to_abort:
                abort(red('There have been errors. Please fix them and run'
                          ' the check again.'))
            else:
                puts(green('Syntax check found no errors. Very good!'))
Beispiel #18
0
def configure():
    for mount_point, config in blueprint.get('', {}).items():
        if 'filesystem' in config:
            debian.mount(mount_point, **config)
        else:
            warn('Mount point {} not configured with filesystem, skipping'.
                 format(mount_point))
Beispiel #19
0
def enable(conf, weight, do_restart=True):
    """
    Enable logstash input/output provider

    :param conf: Input or output provider config file
    :param weight: Weight of provider
    :param do_restart: Restart service
    :return: Got enabled?
    """
    enabled = False
    conf = conf if conf.endswith(".conf") else "{}.conf".format(conf)

    with sudo():
        available_conf = os.path.join(conf_available_path, conf)
        if not files.exists(available_conf):
            warn("Invalid conf: {}".format(conf))
        else:
            with cd(conf_enabled_path):
                weight = str(weight).zfill(2)
                conf = "{}-{}".format(weight, conf)
                if not files.exists(conf):
                    info("Enabling conf: {}", conf)
                    with silent():
                        debian.ln(available_conf, conf)
                        enabled = True
                    if do_restart:
                        restart("server")

    return enabled
Beispiel #20
0
def get_pip_list_task(**kwargs):
    prepare_env(**kwargs)
    get_pip_list()
    result = run(
        'mysql -u root -pcc3721b edc -Bse \'select prev_batch_id from edc_sync_incomingtransaction LIMIT 1;\'')
    if 'ERROR' in result:
        warn(f'{env.host}: bad DB')
Beispiel #21
0
def create_copy_for_next():
    """Copy the current version to "next" so that we can do stuff like
    the VCS update and virtualenv update without taking the site offline"""
    # TODO: check if next directory already exists
    # if it does maybe there was an aborted deploy, or maybe someone else is
    # deploying.  Either way, stop and ask the user what to do.
    if files.exists(env.next_dir):
        utils.warn('The "next" directory already exists.  Maybe a previous '
                   'deploy failed, or maybe another deploy is in progress.')
        continue_anyway = prompt(
            'Would you like to continue anyway '
            '(and delete the current next dir)? [no/yes]',
            default='no',
            validate='^no|yes$')
        if continue_anyway.lower() != 'yes':
            utils.abort(
                "Aborting deploy - try again when you're certain what to do.")
        sudo_or_run('rm -rf %s' % env.next_dir)

    # if this is the initial deploy, the vcs_root_dir won't exist yet. In that
    # case, don't create it (otherwise the checkout code will get confused).
    if files.exists(env.vcs_root_dir):
        # cp -a - amongst other things this preserves links and timestamps
        # so the compare that bootstrap.py does to see if the virtualenv
        # needs an update should still work.
        sudo_or_run('cp -a %s %s' % (env.vcs_root_dir, env.next_dir))
Beispiel #22
0
def _process_addons(op, name='instance', collector=None):
    stack = []
    stack.append(('local', 'mkdir -p addons'))
    stack.append(('lcd', 'addons'))
    addons = config.get(name, 'addons')
    for addon_spec in addons.splitlines():
        mo = SPEC.match(addon_spec)
        addon_name, _, subdir = [g.strip() for g in mo.groups()]
        dest = os.path.normpath(os.path.join(addon_name, subdir))
        stack.append(('local', 'mkdir -p ' + addon_name))
        if not config.has_option('repo', addon_name):
            warn(
                "'%s' is not found in section 'repo', but added to addons_path anyway."
                % addon_name)
            addons_path_adder(None, os.path.join(ROOT, dest))
        else:
            repos = config.get('repo', addon_name)
            repo_spec = [l for l in repos.splitlines() if l]
            multiline = len(repo_spec) > 1
            for spec in repo_spec:
                process_repo(op,
                             name,
                             addon_name,
                             spec,
                             stack,
                             collector=collector,
                             multiline=multiline)
            stack.append((('lcd', dest), ('local', 'pwd', addons_path_adder)))
    return stack
Beispiel #23
0
def deploy():
    '''Sync code from here to the servers'''
    global env
    global SITE_NAME

    # Two separate calculations because Mac has HOME=/Users/swaroop and
    # Linux has HOME=/home/swaroop and therefore cannot use the same dirname.
    local_dir = os.path.join(os.getenv('HOME'), 'web', SITE_NAME, 'private',
                             SITE_NAME)
    remote_dir = os.path.join('/home', os.getlogin(), 'web', SITE_NAME,
                              'private', SITE_NAME)
    _transfer_files(local_dir, env.host + ':' + remote_dir, ssh_port=env.port)
    sudo('apache2ctl graceful')
    try:
        urllib2.urlopen('http://' + env.host_string)
    except urllib2.HTTPError as x:
        warn(
            colors.red(
                "Failed! Code deployment was a disaster. Apache is throwing {0}."
                .format(x)))
        showlogs()
        return
    puts(
        colors.magenta('Success! The {0} server has been updated.'.format(
            env.host_string)))
Beispiel #24
0
def deploy(override_prompt=False):
    """
    Deploy the project.
    """

    require('stage', provided_by=(stg, prod))

    latest_build = _get_latest_build()
    current_build = _get_current_build()

    if current_build == 'unknown':
        msg = '''Either this is the first time deploying or the destination is in unknown state.
                Either way, deploying now is a safe operation and this message is only for your own information.'''
        warn('Unable to find a deployed build on the node. %s' % msg)

    print 'Build currently deployed:', current_build
    print 'Build available for deploying:', latest_build.split('/')[1]
    print

    if not override_prompt:
        continue_prompt = confirm('Ready to deploy?')
        if not continue_prompt:
            abort('Not ready to deploy')
    if latest_build.split('/')[1] == current_build:
        warn('You are about to deploy the exact same build again')
        dupe_deploy_prompt = confirm('Are you use you want to deploy the same build again?')
        if not dupe_deploy_prompt:
            abort('Not deploying duplicate build')
    download_build(s3_bucket, latest_build, temp_dir)
Beispiel #25
0
def clone(url, branch=None, repository_path=None, **kwargs):
    """
    Clone repository and branch.

    :param url: Git url to clone
    :param branch: Branch to checkout
    :param repository_path: Destination
    :param kwargs: Not used but here for easier kwarg passing
    :return: (destination, got_cloned bool)
    """
    repository = parse_url(url, branch=branch)
    name = repository['name']
    branch = repository['branch']
    cloned = False

    if not repository_path:
        repository_path = os.path.join('.', name)

    if not files.exists(os.path.join(repository_path, '.git')):
        info('Cloning {}@{} into {}', url, branch, repository_path)
        with silent('warnings'):
            cmd = 'git clone -b {branch} {remote} {name}'.format(branch=branch, remote=url, name=name)
            output = run(cmd)
        if output.return_code != 0:
            warn('Failed to clone repository "{}", probably permission denied!'.format(name))
            cloned = None
        else:
            cloned = True
    else:
        info('Git repository already cloned: {}', name)

    return repository_path, cloned
Beispiel #26
0
def assert_release_branch(release_branch='master'):
    """ Checks the currently checked out branch is `release_branch`
    """
    branch = get_current_branch()
    if branch != release_branch:
        warn(ASSERT_RELEASE_BRANCH_WARNING.format(release_branch, branch))
        continue_check()
Beispiel #27
0
    def provision(self):
        '''
        Installs `RabbitMQ <http://www.rabbitmq.com/>`_ and dependencies.
        This method should be called upon if overriden in base classes, or RabbitMQ won't work properly in the remote server.

        Example:
        ::

            from provy.core import Role
            from provy.more.centos import HgRole

            class MySampleRole(Role):
                def provision(self):
                    self.provision_role(RabbitMqRole)
        '''
        with self.using(YumRole) as role:
            role.ensure_up_to_date()
            role.ensure_package_installed('rabbitmq-server')

        # Start rabbitmq at startup, TODO: add chkconfig role
        self.execute('chkconfig --add rabbitmq-server',
                     stdout=False,
                     sudo=True)
        self.execute('chkconfig rabbitmq-server on', stdout=False, sudo=True)

        # Make sure rabbit is running:
        if not self.is_process_running('rabbitmq-server'):
            self.execute(
                'service rabbitmq-server start',
                stdout=False,
                sudo=True,
            )

        if self.user_exists('guest'):
            warn(GUEST_USER_WARNING)
Beispiel #28
0
def enable(conf, weight, do_restart=True):
    """
    Enable logstash input/output provider

    :param conf: Input or output provider config file
    :param weight: Weight of provider
    :param do_restart: Restart service
    :return: Got enabled?
    """
    enabled = False
    conf = conf if conf.endswith('.conf') else '{}.conf'.format(conf)

    with sudo():
        available_conf = os.path.join(conf_available_path, conf)
        if not files.exists(available_conf):
            warn('Invalid conf: {}'.format(conf))
        else:
            with cd(conf_enabled_path):
                weight = str(weight).zfill(2)
                conf = '{}-{}'.format(weight, conf)
                if not files.exists(conf):
                    info('Enabling conf: {}', conf)
                    with silent():
                        debian.ln(available_conf, conf)
                        enabled = True
                    if do_restart:
                        restart('server')

    return enabled
Beispiel #29
0
def create_copy_for_next():
    """Copy the current version to "next" so that we can do stuff like
    the VCS update and virtualenv update without taking the site offline"""
    # check if next directory already exists
    # if it does maybe there was an aborted deploy, or maybe someone else is
    # deploying.  Either way, stop and ask the user what to do.
    if files.exists(env.next_dir):
        utils.warn('The "next" directory already exists.  Maybe a previous '
                   'deploy failed, or maybe another deploy is in progress.')
        continue_anyway = prompt('Would you like to continue anyway '
                                 '(and delete the current next dir)? [no/yes]',
                default='no', validate='^no|yes$')
        if continue_anyway.lower() != 'yes':
            utils.abort("Aborting deploy - try again when you're certain what to do.")
        sudo_or_run('rm -rf %s' % env.next_dir)

    # if this is the initial deploy, the vcs_root_dir won't exist yet. In that
    # case, don't create it (otherwise the checkout code will get confused).
    if files.exists(env.vcs_root_dir):
        # cp -a - amongst other things this preserves links and timestamps
        # so the compare that bootstrap.py does to see if the virtualenv
        # needs an update should still work.
        sudo_or_run('cp -a %s %s' % (env.vcs_root_dir_timestamp, env.next_dir))

        # fix the virtualenv
        _fix_virtualenv_paths()
Beispiel #30
0
def validate_boot_options(options):
    """
    Validate systemd boot options
    """
    if lsb_release() == '14.04':
        none_boot_options_check=boot_options_16
    else:
        none_boot_options_check=boot_options_14
    for option in options.split(','):
        stript=0
        if re.search('=',option):
            option = option.split('=')[0]
            stript=1
        if option in none_boot_options_check:
            warn(option +" is not a valid for "+lsb_release()+" check boot option in your yaml")
            if lsb_release() == '14.04':
                info("Valid boot options is:"+str(boot_options_14))
            else:
                info("Valid boot options is:"+str(boot_options_16))
            exit()
        if stript==0:
            if re.search('x-systemd',option):
                option = option.replace("x-systemd.","")
            if option in boot_options_16_more:
                warn("Missing = for x-systemd."+option)
                exit()
Beispiel #31
0
def install(project_name):
    print "Installing dad for %s" % project_name
    env.project_name = project_name

    # Copy templates
    if os.path.exists(env.apacheconf_path):
        warn("Warning: apache config directory already exists, skipping.\n")
    else:
        local('mkdir %s' % env.apacheconf_path)
        local('cp %s %s' % (_get_template('apache/example.conf'), env.apacheconf_path))
        local('cp %s %s' % (_get_template('apache/prod.conf'), env.apacheconf_path))
        local('cp %s %s' % (_get_template('apache/demo.conf'), env.apacheconf_path))
        local('cp %s %s' % (_get_template('apache/demo.wsgi'), env.apacheconf_path))
        local('cp %s %s' % (_get_template('apache/prod.wsgi'), env.apacheconf_path))

    if os.path.exists(env.dadconf_path):
        warn("Warning: dad config directory already exists, skipping.\n")
    else:
        local('mkdir %s' % env.dadconf_path)
        # requirements.txt
        local('cp %s %s' % (_get_template('requirements.txt'), env.dadconf_path))
       #for stage in STAGES:
       #    req = _get_template('requirements_%s.txt' % stage)
       #    if files.exists(req):
       #        local('cp %s %s' % (req, env.dadconf_path))

        _template(_get_template('project.yml'), os.path.join(env.dadconf_path, 'project.yml'), {
            'project_name': project_name,
        })
    
    for stage in STAGES:
        dest = os.path.join(os.path.join(env.base_path, project_name), 'settings_%s.py' % stage)
        src  = _get_template_path('settings_%s.py' % stage)
        if not os.path.exists(dest):
            _template(src, dest, { 'project_name': project_name })
Beispiel #32
0
def enable(site, do_reload=True):
    """
    Enable site

    :param site: Site to enable
    :param do_reload: Reload nginx service
    :return: Got enabled?
    """
    enabled = False
    site = site if site.endswith('.conf') or site == 'default' else '{}.conf'.format(site)

    with sudo():
        available_site = os.path.join(sites_available_path, site)
        if not files.exists(available_site):
            warn('Invalid site: {}'.format(site))
        else:
            with cd(sites_enabled_path):
                if not files.exists(site):
                    info('Enabling site: {}', site)
                    with silent():
                        debian.ln(available_site, site)
                        enabled = True
                    if do_reload:
                        reload()

    return enabled
Beispiel #33
0
def web_nginx_setup_domain(domain, proto='http', interface='*', upstream_address='', upstream_port=''):
    """ Setup Nginx config file for a domain - Ex: (cmd:<domain>,[protocol],[port])"""
    if 'https' in proto or 'ssl' in proto:
        proto = 'https'
        ssl_crt = '/etc/ssl/nginx/crt/{}.combo.crt'.format(domain)
        ssl_key = '/etc/ssl/nginx/key/{}.key'.format(domain)
        if not files.exists(ssl_crt, use_sudo=True) or not files.exists(ssl_key, use_sudo=True):
            warn('ssl certificate and key not found.\n{}\n{}'.format(ssl_crt, ssl_key))

    cfgdir = os.path.join(os.path.dirname( __file__), '../cfg')
    nginx_avail_dir = '/etc/nginx/sites-available'
    nginx_enabled_dir = '/etc/nginx/sites-enabled'

    localcfg = os.path.expanduser(os.path.join(cfgdir, 'nginx/{}.conf'.format(proto)))
    remotecfg = '{}/{}.{}'.format(nginx_avail_dir, proto, domain)
    sudo('rm -rf ' + remotecfg)
    put(localcfg, remotecfg, use_sudo=True)

    if upstream_address and upstream_port:
        sudo('sed -i "s/upstream_address/{}/g" {}'.format(upstream_address, remotecfg))
        sudo('sed -i "s/upstream_port/{}/g" {}'.format(upstream_port, remotecfg))

    sudo('sed -i "s/public_interface/{}/g" {}'.format(interface, remotecfg))
    sudo('sed -i "s/example\.com/{}/g" {}'.format(domain.replace('.', '\.'), remotecfg))
    sudo('chown -R root:root {}'.format(nginx_avail_dir))
    sudo('chmod -R 755 {}'.format(nginx_avail_dir))
    with cd(nginx_enabled_dir):
        sudo('ln -sf {}'.format(remotecfg))
    time.sleep(2)
    sys_reload_service('nginx')
    sys_etc_git_commit('Setup Nginx Config for Domain {}'.format(domain))
Beispiel #34
0
def check_presto_version():
    """
    Checks that the Presto version is suitable.

    Returns:
        Error string if applicable
    """
    version = get_presto_version()
    if version in PRESTO_TD_RPM:
        return ""
    try:
        # remove -SNAPSHOT or .SNAPSHOT from the version string
        version = re.sub(r"[-\.]SNAPSHOT", "", version)
        float(version)
        version_number = version.strip().split(".")
        if int(version_number[1]) < PRESTO_RPM_MIN_REQUIRED_VERSION:
            incorrect_version_str = "Presto version is %s, version >= 0.%d " "required." % (
                version,
                PRESTO_RPM_MIN_REQUIRED_VERSION,
            )
            warn(incorrect_version_str)
            return incorrect_version_str
        return ""
    except ValueError:
        not_installed_str = "Presto is not installed."
        warn(not_installed_str)
        return not_installed_str
Beispiel #35
0
def create_from_disk(
    disk_query, cluster_query=None, template_query='name=Blank',
    memory=2 * GiB, vcpus=2, set_disk_bootable='yes',
    networks=None,
    show=None, headers='yes', ovirt=None
):
    """
    Create oVirt VMs from the given disks. The names of the VMs will be the
    same a the names of the disks. If VMs with disk names already exist, VM
    creation will be skipped with a warning

    :param str disk_query: A query to find the disk to use, if multipile disks
                           are found, a vm will be created for each and every
                           disk
    :param str set_disk_bootable: If true mark the disk as bootable

    The cluster_query, template_query, memory, vcpus and networks parameters
    are like the ones for the 'create' task.
    The 'show' and 'headers' parameters are the same as for the 'query' task

    :param ovirtsdk.api.API ovirt: An open oVirt API connection

    :returns: The vms that were created
    :rtype: list
    """
    disks = ovirt.disks.list(query=disk_query)
    vms = []
    for disk in disks:
        if cluster_query is None:
            # If cluster is not specified, ensure we choose a cluster that is
            # attached to the storage domain that hosts the disk
            disk_sd = disk.get_storage_domains().get_storage_domain()[0]
            disk_sd = ovirt.storagedomains.get(id=disk_sd.id)
            cluster_query = "Storage={0}".format(disk_sd.name)
        with hide('user'):
            try:
                vm = create(
                    name=disk.name,
                    cluster_query=cluster_query,
                    template_query=template_query,
                    memory=memory,
                    vcpus=vcpus,
                    networks=networks,
                    ovirt=ovirt
                )
            except oVirtErrors.RequestError as e:
                if e.detail.find('VM name is already in use') < 0:
                    raise
                warn("VM '{0}' already exists".format(disk.name))
                continue
            if set_disk_bootable == 'yes':
                disk.set_bootable(True)
            disk.set_active(True)
            vm.disks.add(disk)
            vms.append(vm)
    oVirtObjectType.all_types['vm'].print_table(
        vms, show=show, headers=headers
    )
    return vms
Beispiel #36
0
def stop_fcgi():
    'Stop the Django FastCGI daemon'

    if not exists(env.pidfile):
        warn('PID file does not exist (%(pidfile)s), you may have to manually kill the fcgi process' % env)
    else:
        run('kill -term `cat %(pidfile)s`' % env)
        run('rm %(pidfile)s' % env)
Beispiel #37
0
def authorize_remove(username, publickeyfile):
    ''' args: (remote) username, (local) publickeyfile; remove publickeyfile entries from authorized_keys of user'''
    authorized_file = __get_authorized_file(username)
    keys = __get_authorized_keys(publickeyfile)
    for key in keys:
        if contains_exact(authorized_file, key, use_sudo=True):
            warn("public key exists, commeting it out")
            comment(authorized_file, key, use_sudo=True)
Beispiel #38
0
def handle_failure(cmd, warn_only):
    if hasattr(cmd, '__name__'):
        cmd = cmd.__name__ + '()'
    message = 'Error running `%s`\n\n%s' % (cmd, indent(format_exc()))
    if warn_only:
        warn(message)
    else:
        abort(message)
Beispiel #39
0
def sys_user_delete(username):
    """ Delete new user - Ex: (cmd:<user>)"""
    with settings(warn_only=True):
        if username == 'root':
            warn('Cannot delete root user')
        sudo('pkill -KILL -u {}'.format(username))
        sudo('userdel {}'.format(username))
    sys_etc_git_commit('Deleted user({})'.format(username))
Beispiel #40
0
def handle_failure(cmd, warn_only):
    if hasattr(cmd, '__name__'):
        cmd = cmd.__name__ + '()'
    message = 'Error running `%s`\n\n%s' % (cmd, indent(format_exc()))
    if warn_only:
        warn(message)
    else:
        abort(message)
Beispiel #41
0
def custom_cassandra_shutdown():
    """
    Shutdown Cassandra
    """
    if exists(CASSANDRA_PID):
        run("kill $(cat {0}) || true".format(CASSANDRA_PID))
        run("test -e {0} && rm {0}".format(CASSANDRA_PID))
    else:
        warn("Can't stop Cassandra: pid file not found")
Beispiel #42
0
def start_iperf_server(port=5005):
    # TODO figure out why this doesn't work consistently
    output = sudo("nohup iperf -s -p %s -D" % port)
    try:
        pid = re.compile(REGEX_IPERF_PID_OUTPUT,
                         flags=re.MULTILINE).search(output.stdout).group('pid')
        puts("found pid %s" % pid)
    except AttributeError:
        warn("could not find pid for iperf on %(host)s" % env)
Beispiel #43
0
def _pre_pip():
    pip_version = run('{0} --version'.format(env.pip_basic_cmd)).split(' ')[1]
    if pip_version >= env.pip_version:
        fastprint(green('pip ready'), end='\n')
    else:
        warn(red('installing pip'))
        run('{0} install --upgrade pip'.format(env.pip_basic_cmd))
        warn(red('upgraded pip'))
        fastprint(green('pip ready'), end='\n')
def clean():
	if confirm("Clean will delete any files that is ignored by gitignore\nand also any files that not yet tracked by git.\nAre your sure you want to continue ?",default=False):
		warn("Deleting Untracked and Ignore Files, you have been WARNED!")
		local("git clean -d -f")
		local("mkdir -p build/iphone")
		
		puts("Project is now clean.")
	else:
		warn("CLEAN IS CANCELLED.") 
Beispiel #45
0
def configure(config_files, path):
    dist_files = get_dist_files(path)
    new_config_files = []
    for f in dist_files:
        # copy dist file to installation
        sudo('cp %s %s' % (f, f.split('.dist')[0]))
        new_config_files.append(f.split('.dist')[0])
    if not config_files:
        config_files = new_config_files
        old_config = False
    else:
        old_config = True
    for f in config_files:
        name = f.split('/')[-1]
        for d in dist_files:

            if name in d and old_config:
                # check if there are differences in between new
                # dist files and old ones
                try:
                    run('diff %s %s.dist' % (d, f))
                except:
                    # if diff has an output
                    same_dist = False
                else:
                    same_dist = True

                if not same_dist:
                    same_dist = confirm('Dist files differ. Apply old %s?' %
                                        ' '.join(config_files))
                # copy file from previous installation if it exists
                # and the user tells you to (in case of differences)
                if same_dist and exists(f):
                    warn('Copying old %s' % f.split('/')[-1])
                    sudo('cp %s %s' % (f, d.split('.dist')[0]))
                else:
                    warn('Copying %s' % d)
                    sudo('cp %s %s' % (d, d.split('.dist')[0]))
    for f in config_files:
        if not confirm(
                'Please make sure %s is configured!' % d.split('.dist')[0]):
            return False
    with cd(path):
        while True:
            # we have to execute the following commands in order
            # to move on. In case they fail we have to do some more
            # editing in settings py probably.
            try:
                operations.try_to_execute('./manage.py syncdb --noinput')
                operations.try_to_execute('./manage.py migrate')
                operations.try_to_execute(
                    './manage.py collectstatic --noinput')
            except Exception as e:
                if not confirm('%s. Retry?' % e):
                    return False
            else:
                return True
Beispiel #46
0
def custom_cassandra_shutdown():
    """
    Shutdown Cassandra
    """
    if exists(CASSANDRA_PID):
        run("kill $(cat {0}) || true".format(CASSANDRA_PID))
        run("test -e {0} && rm {0}".format(CASSANDRA_PID))
    else:
        warn("Can't stop Cassandra: pid file not found")
Beispiel #47
0
 def valid_members(self):
     for info in self.members:
         # Light sanity check
         if info.name.startswith(('/', '..')):
             warn("> Ignoring '{0}'! Could affect other paths outside the untar directory.".format(info.name))
         else:
             puts(info.name)
             self.valid.append(info.name)
             yield info
Beispiel #48
0
def stop_fcgi():
    'Stop the Django FastCGI daemon'

    if not exists(env.pidfile):
        warn(
            'PID file does not exist (%(pidfile)s), you may have to manually kill the fcgi process'
            % env)
    else:
        run('kill -term `cat %(pidfile)s`' % env)
        run('rm %(pidfile)s' % env)
Beispiel #49
0
def configuration_fetch(file_name, config_destination, should_warn=True):
    remote_file_path = os.path.join(constants.REMOTE_CONF_DIR, file_name)
    if not files.exists(remote_file_path):
        if should_warn:
            warn("No configuration file found for %s at %s" %
                 (env.host, remote_file_path))
        return None
    else:
        get(remote_file_path, config_destination, use_sudo=True)
        return remote_file_path
Beispiel #50
0
def create_db_dict(config_files):
    for f in config_files:
        if 'settings.py' in f:
            settings_file = f.split('.dist')[0]
            if exists(settings_file):
                break
            else:
                warn('No settings file found from previous installation')
                return False
    return django.db_config(settings_file)
Beispiel #51
0
def assert_ahead_of_current_release(current_release='origin/master'):
    """ Checks the local branch is ahead of `current_release`.

        Assumes `current_release` to be the `master` branch of your remote
        `origin`. Can specify a local branch instead.
    """
    upstream_ahead, local_ahead = diff_branch(current_release)
    if upstream_ahead > 0:
        warn(ASSERT_AHEAD_OF_CURRENT_RELEASE_WARNING.format(current_release))
        continue_check()
Beispiel #52
0
def check_status_for_control_commands():
    client = PrestoClient(env.host, env.port)
    print('Waiting to make sure we can connect to the Presto server on %s, '
          'please wait. This check will time out after %d minutes if the '
          'server does not respond.' % (env.host, (RETRY_TIMEOUT / 60)))
    if check_server_status(client):
        print('Server started successfully on: ' + env.host)
    else:
        warn('Server failed to start on: ' + env.host + '\nPlease check ' +
             REMOTE_PRESTO_LOG_DIR + '/server.log')
Beispiel #53
0
def setup_linode(production_root_url):
    """get things set up on linode"""

    # make sure the secret key exists
    fabhelp.progress("adding subdomain '%s'to linode resources"%\
                         production_root_url.subdomain)
    linoderc = ConfigParser.ConfigParser()
    linoderc.read([
        os.path.join(
            os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
            "conf",
            "linoderc",
        ),
        os.path.expanduser("~/.linoderc"),
    ])
    try:
        linode_secret_key = linoderc.get("keys", "secret")
    except ConfigParser.NoOptionError:
        msg = "need to add the secret linode key to ~/.linoderc file.\n"
        msg += "for more information, see common/conf/linoderc"
        abort(msg)
    
    # create linode_api instance
    from linode.api import Api
    linode_api = Api(linode_secret_key)

    # get the domainid 
    domainid = None
    for d in linode_api.domain_list():
        if d["DOMAIN"] == "datascopeanalytics.com":
            domainid = d["DOMAINID"]

    # get ip address for datascopeanalytics.com
    import socket
    addrinfos = socket.getaddrinfo("datascopeanalytics.com", 80)
    for addrinfo in addrinfos[1:]:
        assert addrinfos[0][4][0] == addrinfo[4][0]
    datascopeanalytics_ip_address = addrinfos[0][4][0]

    # create new domain resource for this subdomain as necessary
    create_resource = True
    for r in linode_api.domain_resource_list(domainid=domainid):
        if r["TYPE"].lower() == 'a' and \
                r["NAME"]==production_root_url.subdomain:
            create_resource = False
            warn("subdomain '%s' already exists..."%\
                     production_root_url.subdomain)
            break
    if create_resource:
        linode_api.domain_resource_create(
            domainid=domainid, 
            type="A", 
            name=production_root_url.subdomain, 
            target=datascopeanalytics_ip_address,
        )
Beispiel #54
0
def check_status_for_control_commands():
    client = PrestoClient(env.host, env.user)
    print('Waiting to make sure we can connect to the Presto server on %s, '
          'please wait. This check will time out after %d minutes if the '
          'server does not respond.' % (env.host, (RETRY_TIMEOUT / 60)))
    if check_server_status(client):
        print('Server started successfully on: ' + env.host)
    else:
        warn('Server failed to start on: ' + env.host + '\nPlease check ' +
             lookup_server_log_file(env.host) + ' and ' +
             lookup_launcher_log_file(env.host))
Beispiel #55
0
def dump_db(config_files):
    db_settings = create_db_dict(config_files)
    success = False
    dump_path = '/tmp/%s.sql' % project_name
    if db_settings:
        success = database.dump_mysql(db_settings, dump_path)
    if not success:
        warn('Could not get db dump...')
        if not confirm('Continue?'):
            abort('Aborting...')
    return dump_path if success else False
Beispiel #56
0
def deploy_all(source_directory, should_warn=True):
    host_config_dir = os.path.join(source_directory, env.host)
    for file_name in ALL_CONFIG:
        local_config_file = os.path.join(host_config_dir, file_name)
        if not os.path.exists(local_config_file):
            if should_warn:
                warn("No configuration file found for %s at %s"
                     % (env.host, local_config_file))
            continue
        remote_config_file = os.path.join(constants.REMOTE_CONF_DIR, file_name)
        put(local_config_file, remote_config_file)
Beispiel #57
0
def apache_configure():
    """
    Configure a remote apache server
    """
    _setup_env()
    servername = env.stage['servername']
    tpl = 'apache/%(role)s.conf' % env
    src = os.path.join(env.stage['path'], tpl)
    if env.role == 'dev':
        use_sudo = False
    else:
        use_sudo = True

    if files.exists(src, use_sudo=use_sudo):
        ctx = {}
        dest_path = env.sysdef['vhosts'] % {'servername': servername}

        if 'error_logs' in env.sysdef:
            ctx['error_logs'] = env.sysdef['error_logs'] % {
                'servername': servername
            }

        if 'access_logs' in env.sysdef:
            ctx['access_logs'] = env.sysdef['access_logs'] % {
                'servername': servername
            }

        if 'user' in env.stage:
            ctx['user'] = env.stage['user']
        else:
            ctx['user'] = env.user

        if 'group' in env.stage:
            ctx['group'] = env.stage['group']
        else:
            ctx['group'] = 'www-data'

        ctx['media_path'] = os.path.join(env.stage['path'], 'media/')
        ctx['static_path'] = os.path.join(env.stage['path'], 'static/')
        ctx['project_name'] = env.project_name
        ctx['project_path'] = env.project_path
        ctx['server_name'] = servername
        ctx['server_admin'] = env.stage['serveradmin']
        ctx['document_root'] = env.stage['path']

        get(os.path.join(env.stage['path'], tpl), '/tmp/test')
        files.upload_template(tpl,
                              dest_path,
                              context=ctx,
                              use_sudo=use_sudo,
                              backup=False)

    else:
        warn("Warning %s not found." % src)