Exemple #1
0
def refresh(stackname, context):
    "(safely) replaces the buildvars file on the ec2 instance(s)"

    def _refresh_buildvars():
        old_buildvars = _retrieve_build_vars()

        node = old_buildvars.get('node')
        if not node or not str(node).isdigit():
            # (very) old buildvars. try parsing 'nodename'
            nodename = old_buildvars.get('nodename')
            if nodename: # ll: "elife-dashboard--prod--1"
                node = nodename.split('--')[-1]
                if not node.isdigit():
                    LOG.warning("nodename ends in a non-digit node: %s", nodename)
                    node = None

            if not node:
                # no 'node' and no (valid) 'nodename' present
                # assume this stack was created before nodes were a thing
                # and that there is only 1 in the 'cluster'.
                node = 1

        new_buildvars = trop.build_vars(context, int(node))
        new_buildvars['revision'] = old_buildvars.get('revision') # TODO: is this still necessary?
        _update_remote_bvars(stackname, new_buildvars)

    stack_all_ec2_nodes(stackname, _refresh_buildvars, username=BOOTSTRAP_USER)
Exemple #2
0
def refresh(stackname, context):
    "(safely) replaces the buildvars file on the ec2 instance(s)"

    def _refresh_buildvars():
        old_buildvars = _retrieve_build_vars()

        node = old_buildvars.get('node')
        if not node or not str(node).isdigit():
            # (very) old buildvars. try parsing 'nodename'
            nodename = old_buildvars.get('nodename')
            if nodename: # ll: "elife-dashboard--prod--1"
                node = nodename.split('--')[-1]
                if not node.isdigit():
                    LOG.warning("nodename ends in a non-digit node: %s", nodename)
                    node = None

            if not node:
                # no 'node' and no (valid) 'nodename' present
                # assume this stack was created before nodes were a thing
                # and that there is only 1 in the 'cluster'.
                node = 1

        new_buildvars = trop.build_vars(context, int(node))
        new_buildvars['revision'] = old_buildvars.get('revision')
        _update_remote_bvars(stackname, new_buildvars)

    stack_all_ec2_nodes(stackname, _refresh_buildvars, username=BOOTSTRAP_USER)
Exemple #3
0
def refresh(stackname, context):
    "(safely) replaces the buildvars file on the ec2 instance(s)"

    def _refresh_buildvars():
        old_buildvars = _retrieve_build_vars()

        node = old_buildvars.get('node')
        if not node or not str(node).isdigit():
            # (very) old buildvars. try parsing 'nodename'
            nodename = old_buildvars.get('nodename')
            if nodename: # ll: "elife-dashboard--prod--1"
                node = nodename.split('--')[-1]
                if not node.isdigit():
                    LOG.warning("nodename ends in a non-digit node: %s", nodename)
                    node = None

            if not node:
                # no 'node' and no (valid) 'nodename' present
                # assume this stack was created before nodes were a thing
                # and that there is only 1 in the 'cluster'.
                node = 1

        new_buildvars = trop.build_vars(context, int(node))
        new_buildvars['revision'] = old_buildvars.get('revision') # TODO: is this still necessary?
        _update_remote_bvars(stackname, new_buildvars)

    # lsh@2019-06: cfn.update_infrastructure fails to run highstate on new ec2 instance if keypair not present,
    # it prompts for a password for the deploy user. prompts when executing in parallel cause operation to fail
    keypair.download_from_s3(stackname, die_if_exists=False)

    stack_all_ec2_nodes(stackname, _refresh_buildvars, username=BOOTSTRAP_USER)
Exemple #4
0
def remaster(stackname, new_master_stackname):
    "tell minion who their new master is. deletes any existing master key on minion"
    # TODO: turn this into a decorator
    import cfn
    # start the machine if it's stopped
    # you might also want to acquire a lock so alfred doesn't stop things
    cfn._check_want_to_be_running(stackname, 1)

    master_ip = _cached_master_ip(new_master_stackname)
    LOG.info('re-mastering %s to %s', stackname, master_ip)

    context = context_handler.load_context(stackname)

    # remove if no longer an issue
    # if context.get('ec2') == True:
    #    # TODO: duplicates bad ec2 data wrangling in cfngen.build_context
    #    # ec2 == True for some reason, which is completely useless
    #    LOG.warn("bad context for stack: %s", stackname)
    #    context['ec2'] = {}
    #    context['project']['aws']['ec2'] = {}
    if not context.get('ec2'):
        LOG.info("no ec2 context, skipping %s", stackname)
        return

    if context['ec2'].get('master_ip') == master_ip:
        LOG.info("already remastered: %s", stackname)
        try:
            utils.confirm("Skip?")
            return
        except KeyboardInterrupt:
            LOG.info("not skipping")

    LOG.info("upgrading salt client")
    pdata = core.project_data_for_stackname(stackname)
    context['project']['salt'] = pdata['salt']

    LOG.info("setting new master address")
    cfngen.set_master_address(pdata, context, master_ip)  # mutates context

    # update context
    LOG.info("updating context")
    context_handler.write_context(stackname, context)

    # update buildvars
    LOG.info("updating buildvars")
    buildvars.refresh(stackname, context)

    # remove knowledge of old master
    def work():
        remote_sudo("rm -f /etc/salt/pki/minion/minion_master.pub"
                    )  # destroy the old master key we have

    LOG.info("removing old master key from minion")
    core.stack_all_ec2_nodes(stackname, work, username=config.BOOTSTRAP_USER)

    # update ec2 nodes
    LOG.info("updating nodes")
    bootstrap.update_ec2_stack(stackname, context, concurrency='serial')
    return True
Exemple #5
0
def remaster(stackname, new_master_stackname):
    "tell minion who their new master is. deletes any existing master key on minion"
    # TODO: turn this into a decorator
    import cfn
    # start the machine if it's stopped
    # you might also want to acquire a lock so alfred doesn't stop things
    cfn._check_want_to_be_running(stackname, 1)

    master_ip = _cached_master_ip(new_master_stackname)
    LOG.info('re-mastering %s to %s', stackname, master_ip)

    context = context_handler.load_context(stackname)

    # remove if no longer an issue
    # if context.get('ec2') == True:
    #    # TODO: duplicates bad ec2 data wrangling in cfngen.build_context
    #    # ec2 == True for some reason, which is completely useless
    #    LOG.warn("bad context for stack: %s", stackname)
    #    context['ec2'] = {}
    #    context['project']['aws']['ec2'] = {}
    if not context.get('ec2'):
        LOG.info("no ec2 context, skipping %s", stackname)
        return

    if context['ec2'].get('master_ip') == master_ip:
        LOG.info("already remastered: %s", stackname)
        try:
            utils.confirm("Skip?")
            return
        except KeyboardInterrupt:
            LOG.info("not skipping")

    LOG.info("upgrading salt client")
    pdata = core.project_data_for_stackname(stackname)
    context['project']['salt'] = pdata['salt']

    LOG.info("setting new master address")
    cfngen.set_master_address(pdata, context, master_ip) # mutates context

    # update context
    LOG.info("updating context")
    context_handler.write_context(stackname, context)

    # update buildvars
    LOG.info("updating buildvars")
    buildvars.refresh(stackname, context)

    # remove knowledge of old master
    def work():
        sudo("rm -f /etc/salt/pki/minion/minion_master.pub")  # destroy the old master key we have
    LOG.info("removing old master key from minion")
    core.stack_all_ec2_nodes(stackname, work, username=config.BOOTSTRAP_USER)

    # update ec2 nodes
    LOG.info("updating nodes")
    bootstrap.update_ec2_stack(stackname, context, concurrency='serial')
    return True
Exemple #6
0
def cmd(stackname, command=None, username=DEPLOY_USER):
    if command is None:
        abort("Please specify a command e.g. ./bldr cmd:%s,ls" % stackname)
    LOG.info("Connecting to: %s", stackname)
    stack_all_ec2_nodes(stackname, (parallel(run), {
        'command': command
    }),
                        username=username,
                        abort_on_prompts=True)
Exemple #7
0
def force(stackname, field, value):
    "replace a specific key with a new value in the buildvars for all ec2 instances in stack"
    def _force_single_ec2_node():
        buildvars = read_from_current_host()

        new_vars = buildvars.copy()
        new_vars[field] = value
        _update_remote_bvars(stackname, new_vars)
        LOG.info("updated bvars %s", new_vars)

    stack_all_ec2_nodes(stackname, _force_single_ec2_node, username=BOOTSTRAP_USER)
Exemple #8
0
def force(stackname, field, value):
    "replace a specific key with a new value in the buildvars for all ec2 instances in stack"
    def _force_single_ec2_node():
        buildvars = read_from_current_host()

        new_vars = buildvars.copy()
        new_vars[field] = value
        _update_remote_bvars(stackname, new_vars)
        LOG.info("updated bvars %s", new_vars)

    stack_all_ec2_nodes(stackname, _force_single_ec2_node, username=BOOTSTRAP_USER)
Exemple #9
0
def force(stackname, field, value):
    def _force_single_ec2_node():
        buildvars = read_from_current_host()

        new_vars = buildvars.copy()
        new_vars[field] = value
        _update_remote_bvars(stackname, new_vars)
        LOG.info("updated bvars %s", new_vars)

    stack_all_ec2_nodes(stackname,
                        _force_single_ec2_node,
                        username=BOOTSTRAP_USER)
Exemple #10
0
def cmd(stackname, command=None, username=DEPLOY_USER, clean_output=False, concurrency=None, node=None):
    if command is None:
        utils.errcho("Please specify a command e.g. ./bldr cmd:%s,ls" % stackname)
        exit(1)
    LOG.info("Connecting to: %s", stackname)

    instances = _check_want_to_be_running(stackname)
    if not instances:
        return

    # take out the load of crap that Fabric prints mangling the useful output
    # of a remote command
    custom_settings = {}
    if clean_output:
        custom_settings['fabric.state.output'] = {
            'status': False,
            'running': False
        }
        custom_settings['output_prefix'] = False

    try:
        with settings(**custom_settings):
            return stack_all_ec2_nodes(
                stackname,
                (remote, {'command': command}),
                username=username,
                abort_on_prompts=True,
                concurrency=concurrency_for(stackname, concurrency),
                node=int(node) if node else None
            )
    except CommandException as e:
        LOG.error(e)
        exit(2)
Exemple #11
0
def fix(stackname):
    def _fix_single_ec2_node(stackname):
        LOG.info("checking build vars on node %s", current_node_id())
        try:
            buildvars = _retrieve_build_vars()
            LOG.info("valid bvars found, no fix necessary: %s", buildvars)
        except AssertionError:
            LOG.info("invalid build vars found, regenerating from context")
            context = load_context(stackname)
            # some contexts are missing stackname
            context['stackname'] = stackname
            node_id = current_node_id()
            new_vars = trop.build_vars(context, node_id)
            _update_remote_bvars(stackname, new_vars)

    stack_all_ec2_nodes(stackname, (_fix_single_ec2_node, {'stackname': stackname}), username=BOOTSTRAP_USER)
Exemple #12
0
def switch_revision(stackname, revision=None, concurrency=None):
    if revision is None:
        revision = utils.uin('revision', None)

    def _switch_revision_single_ec2_node():
        buildvars = _retrieve_build_vars()

        if 'revision' in buildvars and revision == buildvars['revision']:
            print('FYI, the instance is already on that revision!')
            return

        new_data = buildvars
        new_data['revision'] = revision
        _update_remote_bvars(stackname, new_data)

    stack_all_ec2_nodes(stackname, _switch_revision_single_ec2_node, username=BOOTSTRAP_USER, concurrency=concurrency)
Exemple #13
0
def cmd(stackname, command=None, username=DEPLOY_USER, clean_output=False, concurrency=None, node=None):
    if command is None:
        abort("Please specify a command e.g. ./bldr cmd:%s,ls" % stackname)
    LOG.info("Connecting to: %s", stackname)

    instances = _check_want_to_be_running(stackname)
    if not instances:
        return

    # take out the load of crap that Fabric prints mangling the useful output
    # of a remote command
    custom_settings = {}
    if clean_output:
        fabric.state.output['status'] = False
        fabric.state.output['running'] = False
        custom_settings['output_prefix'] = False

    try:
        with settings(**custom_settings):
            return stack_all_ec2_nodes(
                stackname,
                (run, {'command': command}),
                username=username,
                abort_on_prompts=True,
                concurrency=concurrency_for(stackname, concurrency),
                node=int(node) if node else None
            )
    except FabricException as e:
        LOG.error(e)
        exit(2)
Exemple #14
0
def switch_revision(stackname, revision=None, concurrency=None):
    if revision is None:
        revision = utils.uin('revision', None)

    def _switch_revision_single_ec2_node():
        buildvars = _retrieve_build_vars()

        if 'revision' in buildvars and revision == buildvars['revision']:
            print('FYI, the instance is already on that revision!')
            return

        new_data = buildvars
        new_data['revision'] = revision
        _update_remote_bvars(stackname, new_data)

    stack_all_ec2_nodes(stackname, _switch_revision_single_ec2_node, username=BOOTSTRAP_USER, concurrency=concurrency)
Exemple #15
0
def fix(stackname):
    def _fix_single_ec2_node(stackname):
        LOG.info("checking build vars on node %s", current_node_id())
        try:
            buildvars = _retrieve_build_vars()
            LOG.info("valid bvars found, no fix necessary: %s", buildvars)
        except AssertionError:
            LOG.info("invalid build vars found, regenerating from context")
            context = load_context(stackname)
            # some contexts are missing stackname
            context['stackname'] = stackname
            node_id = current_node_id()
            new_vars = trop.build_vars(context, node_id)
            _update_remote_bvars(stackname, new_vars)

    stack_all_ec2_nodes(stackname, (_fix_single_ec2_node, {'stackname': stackname}), username=BOOTSTRAP_USER)
Exemple #16
0
def set_versions(stackname, *repolist):
    """updates the cloned formulas on a masterless stack to a specific revision.
    call with formula name and a revision, like: builder-private@ab87af78asdf2321431f31"""

    context = context_handler.load_context(stackname)
    fkeys = ['formula-repo', 'formula-dependencies', 'private-repo', 'configuration-repo']
    fdata = subdict(context['project'], fkeys)
    repolist = parse_validate_repolist(fdata, *repolist)

    if not repolist:
        return 'nothing to do'

    def updater():
        for repo, formula, revision in repolist:
            bootstrap.run_script('update-masterless-formula.sh', repo, formula, revision)

    core.stack_all_ec2_nodes(stackname, updater, concurrency='serial')
Exemple #17
0
def force(stackname, field, value):
    "replace a specific key with a new value in the buildvars for all ec2 instances in stack"

    def _force_single_ec2_node():
        # do not validate build vars.
        # this way it can be used to repair buildvars when they are missing some field.
        #buildvars = _validate()
        buildvars = read_from_current_host()

        new_vars = buildvars.copy()
        new_vars[field] = value
        _update_remote_bvars(stackname, new_vars)
        LOG.info("updated bvars %s", new_vars)

    stack_all_ec2_nodes(stackname,
                        _force_single_ec2_node,
                        username=BOOTSTRAP_USER)
Exemple #18
0
def set_versions(stackname, *repolist):
    """updates the cloned formulas on a masterless stack to a specific revision.
    call with formula name and a revision, like: builder-private@ab87af78asdf2321431f31"""

    context = context_handler.load_context(stackname)
    fkeys = ['formula-repo', 'formula-dependencies', 'private-repo', 'configuration-repo']
    fdata = subdict(context['project'], fkeys)
    repolist = parse_validate_repolist(fdata, *repolist)

    if not repolist:
        return 'nothing to do'

    def updater():
        for repo, formula, revision in repolist:
            bootstrap.run_script('update-masterless-formula.sh', repo, formula, revision)

    core.stack_all_ec2_nodes(stackname, updater, concurrency='serial')
Exemple #19
0
def remaster(stackname, new_master_stackname="master-server--2018-04-09-2"):
    "tell minion who their new master is. deletes any existing master key on minion"

    # start instance if it is stopped
    # acquire a lock from Alfred (if possible) so instance isn't shutdown while being updated
    cfn._check_want_to_be_running(stackname, autostart=True)

    master_ip = _cached_master_ip(new_master_stackname)
    LOG.info('re-mastering %r to %r', stackname, master_ip)

    context = context_handler.load_context(stackname)

    if not context.get('ec2'):
        LOG.info("no ec2 context, skipping %s", stackname)
        return

    if context['ec2'].get('master_ip') == master_ip:
        LOG.info("already remastered: %s", stackname)
        return

    pdata = core.project_data_for_stackname(stackname)

    LOG.info("setting new master address")
    cfngen.set_master_address(pdata, context, master_ip)  # mutates context

    LOG.info("updating context")
    context_handler.write_context(stackname, context)

    LOG.info("updating buildvars")
    buildvars.refresh(stackname, context)

    # remove knowledge of old master by destroying the minion's master pubkey
    def workerfn():
        remote_sudo("rm -f /etc/salt/pki/minion/minion_master.pub")

    LOG.info("removing old master key from minion")
    core.stack_all_ec2_nodes(stackname,
                             workerfn,
                             username=config.BOOTSTRAP_USER)

    LOG.info("updating nodes")

    # todo: how to pass in --dry-run to highstate.sh ?
    bootstrap.update_ec2_stack(stackname, context, concurrency='serial')
    return True
Exemple #20
0
def read(stackname):
    "returns the unencoded build variables found on given instance"
    return stack_all_ec2_nodes(stackname,
                               lambda: pprint(read_from_current_host()),
                               username=BOOTSTRAP_USER)
Exemple #21
0
def valid(stackname):
    return stack_all_ec2_nodes(stackname,
                               lambda: pprint(_validate()),
                               username=BOOTSTRAP_USER)
Exemple #22
0
def valid(stackname):
    return stack_all_ec2_nodes(stackname, lambda: pprint(_retrieve_build_vars()), username=BOOTSTRAP_USER)
Exemple #23
0
def read(stackname):
    "returns the unencoded build variables found on given instance"
    return stack_all_ec2_nodes(stackname, lambda: pprint(read_from_current_host()), username=BOOTSTRAP_USER)
 def test_no_running_instances_found(self, stack_data):
     stack_data.return_value = []
     self.assertEqual(
         core.stack_all_ec2_nodes('dummy1--test', lambda: True),
         {}
     )
 def test_no_running_instances_found(self, stack_data):
     stack_data.return_value = []
     self.assertEqual(
         core.stack_all_ec2_nodes('dummy1--test', lambda: True),
         {}
     )
Exemple #26
0
def valid(stackname):
    return stack_all_ec2_nodes(stackname,
                               lambda: pprint(_retrieve_build_vars()),
                               username=BOOTSTRAP_USER)