예제 #1
0
파일: orch.py 프로젝트: sdskit/sdscli
def stop(comp, debug=False, force=False):
    """Stop components."""

    # prompt user
    if not force:
        cont = prompt(get_prompt_tokens=lambda x: [(Token.Alert,
                                                    "Stopping component[s]: {}. Continue [y/n]: ".format(comp)), (Token, " ")],
                      validator=YesNoValidator(), style=prompt_style) == 'y'
        if not cont:
            return 0

    # get user's SDS conf settings
    conf = SettingsConf()

    logger.debug("Stopping %s" % comp)

    stop_comp(comp, conf)
예제 #2
0
def start(comp, debug=False, force=False):
    """Start TPS components."""

    # prompt user
    if not force:
        cont = prompt(get_prompt_tokens=lambda x: [(Token.Alert, 
                      "Starting TPS on component[s]: {}. Continue [y/n]: ".format(comp)), (Token, " ")],
                      validator=YesNoValidator(), style=prompt_style) == 'y'
        if not cont: return 0

    # get user's SDS conf settings
    conf = SettingsConf()

    logger.debug("Starting %s" % comp)

    if debug: start_comp(comp, conf)
    else:
        with hide('everything'):
            start_comp(comp, conf)
예제 #3
0
def kibana(job_type, debug=False, force=False):
    """Update components."""

    # prompt user
    if not force:
        cont = prompt(get_prompt_tokens=lambda x: [(
            Token.Alert, "Updating Kibana: {}. Continue [y/n]: ".format(
                job_type)), (Token, " ")],
                      validator=YesNoValidator(),
                      style=prompt_style) == 'y'
        if not cont: return 0

    # get user's SDS conf settings
    conf = SettingsConf()

    logger.debug("Processing %s" % job_type)

    if debug: process_kibana_job(job_type, conf)
    else:
        with hide('everything'):
            process_kibana_job(job_type, conf)
예제 #4
0
def configure():
    """Configure SDS config file for HySDS."""

    # copy templates/files
    copy_files()

    # config file
    cfg_file = get_user_config_path()
    if os.path.exists(cfg_file):
        cont = prompt(get_prompt_tokens=lambda x: [
            (Token, cfg_file), (Token, " already exists. "),
            (Token.Alert, "Customizations will be lost or overwritten!"),
            (Token, " Continue [y/n]: ")
        ],
                      validator=YesNoValidator(),
                      style=prompt_style) == 'y'
        #validator=YesNoValidator(), default='n', style=prompt_style) == 'y'
        if not cont: return 0
        with open(cfg_file) as f:
            cfg = yaml.load(f)
    else:
        cfg = {}

    # mozart
    for k, d in CFG_DEFAULTS['mozart']:
        v = prompt(get_prompt_tokens=lambda x: [(Token, "Enter value for "),
                                                (Token.Param, "%s" % k),
                                                (Token, ": ")],
                   default=unicode(cfg.get(k, d)),
                   style=prompt_style)
        cfg[k] = v

    # mozart components
    comps = [('mozart-rabbit', 'rabbitMQ'), ('mozart-redis', 'redis'),
             ('mozart-es', 'elasticsearch')]
    for grp, comp in comps:
        reuse = prompt("Is mozart %s on a different IP [y/n]: " % comp,
                       validator=YesNoValidator(),
                       default='n') == 'n'
        for k, d in CFG_DEFAULTS[grp]:
            if reuse:
                if k.endswith('_PVT_IP'):
                    cfg[k] = cfg['MOZART_PVT_IP']
                    continue
                elif k.endswith('_PUB_IP'):
                    cfg[k] = cfg['MOZART_PUB_IP']
                    continue
                elif k.endswith('_FQDN'):
                    cfg[k] = cfg['MOZART_FQDN']
                    continue
            if k == 'MOZART_RABBIT_PASSWORD':
                while True:
                    p1 = prompt(get_prompt_tokens=lambda x: [
                        (Token, "Enter RabbitMQ password for user "),
                        (Token.Username, "%s" % cfg['MOZART_RABBIT_USER']),
                        (Token, ": ")
                    ],
                                default=unicode(cfg.get(k, d)),
                                style=prompt_style,
                                is_password=True)
                    p2 = prompt(get_prompt_tokens=lambda x: [
                        (Token, "Re-enter RabbitMQ password for user "),
                        (Token.Username, "%s" % cfg['MOZART_RABBIT_USER']),
                        (Token, ": ")
                    ],
                                default=unicode(cfg.get(k, d)),
                                style=prompt_style,
                                is_password=True)
                    if p1 == p2:
                        if p1 == "":
                            print("Password can't be empty.")
                            continue
                        v = p1
                        break
                    print("Passwords don't match.")
            elif k == 'MOZART_REDIS_PASSWORD':
                while True:
                    p1 = prompt(get_prompt_tokens=lambda x: [(
                        Token, "Enter Redis password: "******"Re-enter Redis password: "******"Passwords don't match.")
            else:
                v = prompt(
                    get_prompt_tokens=lambda x: [(Token, "Enter value for "),
                                                 (Token.Param, "%s" % k),
                                                 (Token, ": ")],
                    default=unicode(cfg.get(k, d)),
                    style=prompt_style)
            cfg[k] = v

    # ops
    for k, d in CFG_DEFAULTS['ops']:
        if k == 'OPS_PASSWORD_HASH':
            while True:
                p1 = prompt(get_prompt_tokens=lambda x: [
                    (Token, "Enter web interface password for ops user "),
                    (Token.Username, "%s" % cfg['OPS_USER']), (Token, ": ")
                ],
                            default="",
                            style=prompt_style,
                            is_password=True)
                p2 = prompt(get_prompt_tokens=lambda x: [
                    (Token, "Re-enter web interface password for ops user "),
                    (Token.Username, "%s" % cfg['OPS_USER']), (Token, ": ")
                ],
                            default="",
                            style=prompt_style,
                            is_password=True)
                if p1 == p2:
                    if p1 == "":
                        print("Password can't be empty.")
                        continue
                    v = hashlib.sha224(p1).hexdigest()
                    break
                print("Passwords don't match.")
        else:
            v = prompt(
                get_prompt_tokens=lambda x: [(Token, "Enter value for "),
                                             (Token.Param, "%s" % k),
                                             (Token, ": ")],
                default=unicode(cfg.get(k, d)),
                style=prompt_style)
        cfg[k] = v

    # metrics
    for k, d in CFG_DEFAULTS['metrics']:
        v = prompt(get_prompt_tokens=lambda x: [(Token, "Enter value for "),
                                                (Token.Param, "%s" % k),
                                                (Token, ": ")],
                   default=unicode(cfg.get(k, d)),
                   style=prompt_style)
        cfg[k] = v

    # metrics components
    comps = [('metrics-redis', 'redis'), ('metrics-es', 'elasticsearch')]
    for grp, comp in comps:
        reuse = prompt("Is metrics %s on a different IP [y/n]: " % comp,
                       validator=YesNoValidator(),
                       default='n') == 'n'
        for k, d in CFG_DEFAULTS[grp]:
            if reuse:
                if k.endswith('_PVT_IP'):
                    cfg[k] = cfg['METRICS_PVT_IP']
                    continue
                elif k.endswith('_PUB_IP'):
                    cfg[k] = cfg['METRICS_PUB_IP']
                    continue
                elif k.endswith('_FQDN'):
                    cfg[k] = cfg['METRICS_FQDN']
                    continue
            if k == 'METRICS_REDIS_PASSWORD':
                while True:
                    p1 = prompt(get_prompt_tokens=lambda x: [(
                        Token, "Enter Redis password: "******"Re-enter Redis password: "******"Passwords don't match.")
            else:
                v = prompt(
                    get_prompt_tokens=lambda x: [(Token, "Enter value for "),
                                                 (Token.Param, "%s" % k),
                                                 (Token, ": ")],
                    default=unicode(cfg.get(k, d)),
                    style=prompt_style)
            cfg[k] = v

    # grq
    for k, d in CFG_DEFAULTS['grq']:
        v = prompt(get_prompt_tokens=lambda x: [(Token, "Enter value for "),
                                                (Token.Param, "%s" % k),
                                                (Token, ": ")],
                   default=unicode(cfg.get(k, d)),
                   style=prompt_style)
        cfg[k] = v

    # grq components
    comps = [('grq-es', 'elasticsearch')]
    for grp, comp in comps:
        reuse = prompt("Is grq %s on a different IP [y/n]: " % comp,
                       validator=YesNoValidator(),
                       default='n') == 'n'
        for k, d in CFG_DEFAULTS[grp]:
            if reuse:
                if k.endswith('_PVT_IP'):
                    cfg[k] = cfg['GRQ_PVT_IP']
                    continue
                elif k.endswith('_PUB_IP'):
                    cfg[k] = cfg['GRQ_PUB_IP']
                    continue
                elif k.endswith('_FQDN'):
                    cfg[k] = cfg['GRQ_FQDN']
                    continue
            v = prompt(
                get_prompt_tokens=lambda x: [(Token, "Enter value for "),
                                             (Token.Param, "%s" % k),
                                             (Token, ": ")],
                default=unicode(cfg.get(k, d)),
                style=prompt_style)
            cfg[k] = v

    # factotum
    for k, d in CFG_DEFAULTS['factotum']:
        v = prompt(get_prompt_tokens=lambda x: [(Token, "Enter value for "),
                                                (Token.Param, "%s" % k),
                                                (Token, ": ")],
                   default=unicode(cfg.get(k, d)),
                   style=prompt_style)
        cfg[k] = v

    # ci
    for k, d in CFG_DEFAULTS['ci']:
        if k in ('JENKINS_API_KEY', 'GIT_OAUTH_TOKEN'):
            while True:
                p1 = prompt(
                    get_prompt_tokens=lambda x: [(Token, "Enter value for "),
                                                 (Token.Param, "%s" % k),
                                                 (Token, ": ")],
                    default=unicode(cfg.get(k, d)),
                    style=prompt_style,
                    is_password=True)
                p2 = prompt(get_prompt_tokens=lambda x: [(
                    Token, "Re-enter value for "), (Token.Param, "%s" % k),
                                                         (Token, ": ")],
                            default=unicode(cfg.get(k, d)),
                            style=prompt_style,
                            is_password=True)
                if p1 == p2:
                    v = p1
                    break
                print("Values don't match.")
        else:
            v = prompt(
                get_prompt_tokens=lambda x: [(Token, "Enter value for "),
                                             (Token.Param, "%s" % k),
                                             (Token, ": ")],
                default=unicode(cfg.get(k, d)),
                style=prompt_style)
        cfg[k] = v

    # verdi
    for k, d in CFG_DEFAULTS['verdi']:
        v = prompt(get_prompt_tokens=lambda x: [(Token, "Enter value for "),
                                                (Token.Param, "%s" % k),
                                                (Token, ": ")],
                   default=unicode(cfg.get(k, d)),
                   style=prompt_style)
        cfg[k] = v

    # webdav
    for k, d in CFG_DEFAULTS['webdav']:
        if k == 'DAV_PASSWORD':
            while True:
                p1 = prompt(get_prompt_tokens=lambda x:
                            [(Token, "Enter webdav password for user "),
                             (Token.Username, "%s" % cfg['DAV_USER']),
                             (Token, ": ")],
                            default=unicode(cfg.get(k, d)),
                            style=prompt_style,
                            is_password=True)
                p2 = prompt(get_prompt_tokens=lambda x:
                            [(Token, "Re-enter webdav password for ops user "),
                             (Token.Username, "%s" % cfg['DAV_USER']),
                             (Token, ": ")],
                            default=unicode(cfg.get(k, d)),
                            style=prompt_style,
                            is_password=True)
                if p1 == p2:
                    v = p1
                    break
                print("Passwords don't match.")
        else:
            v = prompt(
                get_prompt_tokens=lambda x: [(Token, "Enter value for "),
                                             (Token.Param, "%s" % k),
                                             (Token, ": ")],
                default=unicode(cfg.get(k, d)),
                style=prompt_style)
        cfg[k] = v

    # aws-dataset
    for k, d in CFG_DEFAULTS['aws-dataset']:
        if k == 'DATASET_AWS_SECRET_KEY':
            if cfg['DATASET_AWS_ACCESS_KEY'] == "":
                cfg['DATASET_AWS_SECRET_KEY'] = ""
                continue
            while True:
                p1 = prompt(get_prompt_tokens=lambda x: [
                    (Token, "Enter AWS secret key for "),
                    (Token.Username, "%s" % cfg['DATASET_AWS_ACCESS_KEY']),
                    (Token, ": ")
                ],
                            default=unicode(cfg.get(k, d)),
                            style=prompt_style,
                            is_password=True)
                p2 = prompt(get_prompt_tokens=lambda x: [
                    (Token, "Re-enter AWS secret key for "),
                    (Token.Username, "%s" % cfg['DATASET_AWS_ACCESS_KEY']),
                    (Token, ": ")
                ],
                            default=unicode(cfg.get(k, d)),
                            style=prompt_style,
                            is_password=True)
                if p1 == p2:
                    v = p1
                    break
                print("Keys don't match.")
        elif k == 'DATASET_AWS_ACCESS_KEY':
            v = prompt(get_prompt_tokens=lambda x:
                       [(Token, "Enter value for "), (Token.Param, "%s" % k),
                        (Token, ". If using instance roles, just press enter"),
                        (Token, ": ")],
                       default=unicode(cfg.get(k, d)),
                       style=prompt_style)
        else:
            v = prompt(
                get_prompt_tokens=lambda x: [(Token, "Enter value for "),
                                             (Token.Param, "%s" % k),
                                             (Token, ": ")],
                default=unicode(cfg.get(k, d)),
                style=prompt_style)
        cfg[k] = v

    # aws-asg
    for k, d in CFG_DEFAULTS['aws-asg']:
        if k == 'AWS_SECRET_KEY':
            if cfg['AWS_ACCESS_KEY'] == "":
                cfg['AWS_SECRET_KEY'] = ""
                continue
            while True:
                p1 = prompt(get_prompt_tokens=lambda x:
                            [(Token, "Enter AWS secret key for "),
                             (Token.Username, "%s" % cfg['AWS_ACCESS_KEY']),
                             (Token, ": ")],
                            default=unicode(cfg.get(k, d)),
                            style=prompt_style,
                            is_password=True)
                p2 = prompt(get_prompt_tokens=lambda x:
                            [(Token, "Re-enter AWS secret key for "),
                             (Token.Username, "%s" % cfg['AWS_ACCESS_KEY']),
                             (Token, ": ")],
                            default=unicode(cfg.get(k, d)),
                            style=prompt_style,
                            is_password=True)
                if p1 == p2:
                    v = p1
                    break
                print("Keys don't match.")
        elif k == 'AWS_ACCESS_KEY':
            v = prompt(get_prompt_tokens=lambda x:
                       [(Token, "Enter value for "), (Token.Param, "%s" % k),
                        (Token, ". If using instance roles, just press enter"),
                        (Token, ": ")],
                       default=unicode(cfg.get(k, d)),
                       style=prompt_style)
        else:
            v = prompt(
                get_prompt_tokens=lambda x: [(Token, "Enter value for "),
                                             (Token.Param, "%s" % k),
                                             (Token, ": ")],
                default=unicode(cfg.get(k, d)),
                style=prompt_style)
        cfg[k] = v

    # ensure directory exists
    validate_dir(os.path.dirname(cfg_file), mode=0700)
    yml = CFG_TMPL.format(**cfg)
    with open(cfg_file, 'w') as f:
        f.write(yml)
예제 #5
0
def create(args, conf):
    """Create Autoscaling group."""

    # get clients
    c = boto3.client('autoscaling')
    ec2 = boto3.client('ec2')

    # get current autoscaling groups
    cur_asgs = {i['AutoScalingGroupName']: i for i in get_asgs(c)}
    logger.debug("cur_asgs: {}".format(pformat(cur_asgs)))

    # get current launch configs
    cur_lcs = {i['LaunchConfigurationName']: i for i in get_lcs(c)}
    logger.debug("cur_lcs: {}".format(pformat(cur_lcs)))

    # get current key pairs
    cur_keypairs = {i['KeyName']: i for i in get_keypairs(ec2)}
    logger.debug("cur_keypairs: {}".format(pformat(cur_keypairs)))

    # get current AMIs
    verdi_re = re.compile(r'(?:verdi|autoscale)', re.IGNORECASE)
    cur_images = OrderedDict([(i['ImageId'], i) for i in filter(
        lambda x: verdi_re.search(x['Name']),
        sorted(get_images(c=ec2, Owners=['self']),
               key=itemgetter('CreationDate')))])
    logger.debug("cur_images: {}".format(json.dumps(cur_images, indent=2)))
    logger.debug("cur_images.keys(): {}".format(cur_images.keys()))

    # get current security groups
    cur_sgs = {i['GroupId']: i for i in get_sgs(ec2)}
    logger.debug("cur_sgs: {}".format(pformat(cur_sgs)))

    # prompt for verdi AMI
    ami = prompt_image(cur_images)
    logger.debug("AMI ID: {}".format(ami))

    # prompt for key pair
    keypair = prompt_keypair(cur_keypairs)
    logger.debug("key pair: {}".format(keypair))

    # prompt for security groups
    sgs, vpc_id = prompt_secgroup(cur_sgs)
    logger.debug("security groups: {}".format(sgs))
    logger.debug("VPC ID: {}".format(vpc_id))

    # get current AZs
    cur_azs = {i['ZoneName']: i for i in get_azs(ec2)}
    logger.debug("cur_azs: {}".format(pformat(cur_azs)))

    # get subnet IDs and corresponding AZs for VPC
    subnets = []
    azs = set()
    for sn in get_subnets_by_vpc(vpc_id):
        sn_id = sn.subnet_id
        sn_az = sn.availability_zone
        if cur_azs[sn_az]['State'] == 'available':
            subnets.append(sn_id)
            azs.add(sn_az)
    azs = list(azs)
    logger.debug("subnets: {}".format(pformat(subnets)))
    logger.debug("azs: {}".format(pformat(azs)))

    # check asgs that need to be configured
    for project in [i.strip() for i in conf.get('PROJECTS').split()]:
        asg = "{}-{}-{}".format(conf.get('AUTOSCALE_GROUP'), project,
                                conf.get('VENUE'))
        if asg in cur_asgs:
            print("ASG {} already exists. Skipping.".format(asg))
            continue

        print_component_header(
            "Configuring autoscaling group:\n{}".format(asg))

        # get user data
        user_data = "BUNDLE_URL=s3://{}/{}-{}.tbz2".format(
            conf.get('CODE_BUCKET'), project, conf.get('VENUE'))

        # prompt instance type
        instance_type = prompt(get_prompt_tokens=lambda x: [
            (Token, "Refer to https://www.ec2instances.info/ "),
            (Token, "and enter instance type to use for launch "),
            (Token, "configuration: ")
        ],
                               style=prompt_style,
                               validator=Ec2InstanceTypeValidator()).strip()
        logger.debug("instance type: {}".format(instance_type))

        # use spot?
        market = "ondemand"
        spot_bid = None
        use_spot = prompt(get_prompt_tokens=lambda x: [(
            Token, "Do you want to use spot instances [y/n]: ")],
                          validator=YesNoValidator(),
                          style=prompt_style).strip() == 'y'
        if use_spot:
            market = "spot"
            spot_bid = prompt(get_prompt_tokens=lambda x: [(
                Token, "Enter spot price bid: ")],
                              style=prompt_style,
                              validator=PriceValidator()).strip()
            logger.debug("spot price bid: {}".format(spot_bid))

        # get block device mappings and remove encrypteed flag for spot to fire up
        bd_maps = cur_images[ami]['BlockDeviceMappings']
        for bd_map in bd_maps:
            if 'Ebs' in bd_map and 'Encrypted' in bd_map['Ebs']:
                del bd_map['Ebs']['Encrypted']

        # get launch config
        lc_args = {
            'ImageId': ami,
            'KeyName': keypair,
            'SecurityGroups': sgs,
            'UserData': user_data,
            'InstanceType': instance_type,
            'BlockDeviceMappings': bd_maps,
        }
        if spot_bid is None:
            lc = "{}-{}-{}-launch-config".format(asg, instance_type, market)
        else:
            lc = "{}-{}-{}-{}-launch-config".format(asg, instance_type, market,
                                                    spot_bid)
            lc_args['SpotPrice'] = spot_bid
        lc_args['LaunchConfigurationName'] = lc
        if lc in cur_lcs:
            print(
                "Launch configuration {} already exists. Skipping.".format(lc))
        else:
            lc_info = create_lc(c, **lc_args)
            logger.debug("Launch configuration {}: {}".format(
                lc, pformat(lc_info)))
            print("Created launch configuration {}.".format(lc))

        # get autoscaling group config
        asg_args = {
            'AutoScalingGroupName':
            asg,
            'LaunchConfigurationName':
            lc,
            'MinSize':
            0,
            'MaxSize':
            0,
            'DefaultCooldown':
            60,
            'DesiredCapacity':
            0,
            'HealthCheckType':
            'EC2',
            'HealthCheckGracePeriod':
            300,
            'NewInstancesProtectedFromScaleIn':
            False,
            'AvailabilityZones':
            azs,
            'VPCZoneIdentifier':
            ",".join(subnets),
            'Tags': [
                {
                    'Key': 'Name',
                    'Value': '{}-worker'.format(asg),
                    'PropagateAtLaunch': True,
                },
                {
                    'Key': 'Venue',
                    'Value': conf.get('VENUE'),
                    'PropagateAtLaunch': True,
                },
                {
                    'Key': 'Project',
                    'Value': project,
                    'PropagateAtLaunch': True,
                },
            ],
        }
        logger.debug("asg_args: {}".format(pformat(asg_args)))
        asg_info = create_asg(c, **asg_args)
        logger.debug("Autoscaling group {}: {}".format(asg, pformat(asg_info)))
        print("Created autoscaling group {}".format(asg))

        # add target tracking scaling policies
        for size in ('large', 'small'):
            queue = "{}-job_worker-{}".format(project, size)
            policy_name = "{}-{}-target-tracking".format(asg, size)
            metric_name = "JobsWaitingPerInstance-{}-{}".format(queue, asg)
            ttsp_args = {
                'AutoScalingGroupName': asg,
                'PolicyName': policy_name,
                'PolicyType': 'TargetTrackingScaling',
                'TargetTrackingConfiguration': {
                    'CustomizedMetricSpecification': {
                        'MetricName':
                        metric_name,
                        'Namespace':
                        'HySDS',
                        'Dimensions': [{
                            'Name': 'AutoScalingGroupName',
                            'Value': asg,
                        }, {
                            'Name': 'Queue',
                            'Value': queue,
                        }],
                        'Statistic':
                        'Maximum'
                    },
                    'TargetValue': 1.0,
                    'DisableScaleIn': True
                },
            }
            logger.debug("ttsp_args: {}".format(pformat(ttsp_args)))
            ttsp_info = c.put_scaling_policy(**ttsp_args)
            logger.debug("Target tracking scaling policy {}: {}".format(
                policy_name, pformat(ttsp_info)))
            print("Added target tracking scaling policy {} to {}".format(
                policy_name, asg))
예제 #6
0
def create(args, conf):
    """Create Autoscaling group."""

    # get autoscaling conf
    asg_cfg = conf._cfg.get('ASG', {})

    # get clients
    c = boto3.client('autoscaling')
    ec2 = boto3.client('ec2')

    # get current autoscaling groups
    cur_asgs = {i['AutoScalingGroupName']: i for i in get_asgs(c)}
    logger.debug("cur_asgs: {}".format(pformat(cur_asgs)))

    # get current key pairs
    cur_keypairs = {i['KeyName']: i for i in get_keypairs(ec2)}
    logger.debug("cur_keypairs: {}".format(pformat(cur_keypairs)))

    # get roles
    cur_roles = {i['RoleName']: i for i in get_roles()}
    logger.debug("cur_roles: {}".format(pformat(cur_roles)))

    # get current AMIs
    verdi_re = re.compile(r'(?:verdi|autoscale)', re.IGNORECASE)
    cur_images = OrderedDict([(i['ImageId'], i) for i in [
        x for x in sorted(get_images(c=ec2,
                                     Filters=[{
                                         'Name': 'is-public',
                                         'Values': ['false']
                                     }]),
                          key=itemgetter('CreationDate'))
        if verdi_re.search(x['Name'])
    ]])
    logger.debug("cur_images: {}".format(json.dumps(cur_images, indent=2)))
    logger.debug("cur_images.keys(): {}".format(list(cur_images.keys())))

    # get current security groups
    cur_sgs = {i['GroupId']: i for i in get_sgs(ec2)}
    logger.debug("cur_sgs: {}".format(pformat(cur_sgs)))

    # prompt for verdi AMI
    if 'AMI' in asg_cfg:
        ami = asg_cfg['AMI']
    else:
        ami = prompt_image(cur_images)
    logger.debug("AMI ID: {}".format(ami))

    # prompt for key pair
    if 'KEYPAIR' in asg_cfg:
        keypair = asg_cfg['KEYPAIR']
    else:
        keypair = prompt_keypair(cur_keypairs)
    logger.debug("key pair: {}".format(keypair))

    # prompt for roles
    use_role = False
    if 'USE_ROLE' in asg_cfg:
        use_role = asg_cfg['USE_ROLE']
    else:
        use_role = prompt(get_prompt_tokens=lambda x: [(
            Token, "Do you want to use instance roles [y/n]: ")],
                          validator=YesNoValidator(),
                          style=prompt_style).strip() == 'y'
    logger.debug("use_role: {} {}".format(use_role, type(use_role)))
    if use_role:
        if 'ROLE' in asg_cfg:
            role = asg_cfg['ROLE']
        else:
            role = prompt_roles(cur_roles)
        logger.debug("role: {}".format(role))

    # prompt for security groups
    if 'SECURITY_GROUPS' in asg_cfg and 'VPC' in asg_cfg:
        sgs = asg_cfg.get('SECURITY_GROUPS', [])
        vpc_id = asg_cfg.get('VPC', None)
    else:
        sgs, vpc_id = prompt_secgroup(cur_sgs)
    logger.debug("security groups: {}".format(sgs))
    logger.debug("VPC ID: {}".format(vpc_id))

    # get current AZs
    cur_azs = {i['ZoneName']: i for i in get_azs(ec2)}
    logger.debug("cur_azs: {}".format(pformat(cur_azs)))

    # get subnet IDs and corresponding AZs for VPC
    subnets = []
    azs = set()
    for sn in get_subnets_by_vpc(vpc_id):
        sn_id = sn.subnet_id
        sn_az = sn.availability_zone
        if cur_azs[sn_az]['State'] == 'available':
            subnets.append(sn_id)
            azs.add(sn_az)
    azs = list(azs)
    logger.debug("subnets: {}".format(pformat(subnets)))
    logger.debug("azs: {}".format(pformat(azs)))

    # check asgs that need to be configured
    queues = conf._cfg.get('QUEUES', [])
    if len(queues) == 0:
        queue_names = prompt(
            'Please enter queue names, separate by space, for example: [job_worker-small job_worker-large]: ',
            default='job_worker-small job_worker-large')
        q_list = queue_names.split()
        for i in range(len(q_list)):
            inst = prompt(
                'Please enter instance names, separate by space, for ' +
                q_list[i] +
                ', for example: [t2.medium t3.medium t3a.medium]: ',
                default='t2.medium t3.medium t3a.medium')
            i_list = inst.split()
            d = {'QUEUE_NAME': q_list[i], 'INSTANCE_TYPES': i_list}
            queues.append(d)
            logger.debug(str(queues))
    for i, q in enumerate(queues):
        queue = q['QUEUE_NAME']
        ins_type = q['INSTANCE_TYPES']
        inst_type_arr = []
        for j in range(len(ins_type)):
            inst_type_arr.append({'InstanceType': ins_type[j]})
        #used as parameter in Overrides
        overrides = inst_type_arr

        asg = "{}-{}".format(conf.get('VENUE'), queue)
        if asg in cur_asgs:
            print(("ASG {} already exists. Skipping.".format(asg)))
            continue

        print_component_header(
            "Configuring autoscaling group:\n{}".format(asg))

        # get user data
        user_data = "BUNDLE_URL=s3://{}/{}-{}.tbz2".format(
            conf.get('CODE_BUCKET'), queue, conf.get('VENUE'))

        # get block device mappings and remove encrypteed flag for spot to fire up
        bd_maps = cur_images[ami]['BlockDeviceMappings']
        for bd_map in bd_maps:
            if 'Ebs' in bd_map and 'Encrypted' in bd_map['Ebs']:
                del bd_map['Ebs']['Encrypted']

        # get launch template
        lt_args = {
            'LaunchTemplateData': {
                'ImageId': ami,
                'KeyName': keypair,
                'SecurityGroupIds': sgs,
                'UserData': base64.b64encode(user_data.encode()).decode(),
                'BlockDeviceMappings': bd_maps,
            }
        }
        if use_role:
            lt_args['LaunchTemplateData']['IamInstanceProfile'] = {
                'Name': role
            }

        lt = "{}-launch-template".format(asg)
        lt_args['LaunchTemplateName'] = lt
        lt_info = create_lt(ec2, **lt_args)
        logger.debug("Launch template {}: {}".format(lt, pformat(lt_info)))
        print(("Created launch template {}.".format(lt)))

        # get autoscaling group config
        asg_args = {
            'AutoScalingGroupName':
            asg,
            'MixedInstancesPolicy': {
                'LaunchTemplate': {
                    'LaunchTemplateSpecification': {
                        'LaunchTemplateName': lt,
                        'Version': '$Latest'
                    },
                    'Overrides': overrides
                },
                'InstancesDistribution': {
                    'OnDemandAllocationStrategy': 'prioritized',
                    'OnDemandBaseCapacity': 0,
                    'OnDemandPercentageAboveBaseCapacity': 0,
                    'SpotAllocationStrategy': 'lowest-price',
                    'SpotInstancePools': len(ins_type),
                    'SpotMaxPrice': ''
                }
            },
            'MinSize':
            0,
            'MaxSize':
            0,
            'DefaultCooldown':
            60,
            'DesiredCapacity':
            0,
            'HealthCheckType':
            'EC2',
            'HealthCheckGracePeriod':
            300,
            'NewInstancesProtectedFromScaleIn':
            False,
            'AvailabilityZones':
            azs,
            'VPCZoneIdentifier':
            ",".join(subnets),
            'Tags': [
                {
                    'Key': 'Name',
                    'Value': '{}-worker'.format(asg),
                    'PropagateAtLaunch': True,
                },
                {
                    'Key': 'Venue',
                    'Value': conf.get('VENUE'),
                    'PropagateAtLaunch': True,
                },
                {
                    'Key': 'Queue',
                    'Value': queue,
                    'PropagateAtLaunch': True,
                },
                {
                    'Key': 'Bravo',
                    'Value': 'pcm',
                    'PropagateAtLaunch': True,
                },
            ],
        }
        logger.debug("asg_args: {}".format(pformat(asg_args)))
        asg_info = create_asg(c, **asg_args)
        logger.debug("Autoscaling group {}: {}".format(asg, pformat(asg_info)))
        print("Created autoscaling group {}".format(asg))

        # add target tracking scaling policy
        policy_name = "{}-target-tracking".format(asg)
        metric_name = "JobsWaitingPerInstance-{}".format(asg)
        ttsp_args = {
            'AutoScalingGroupName': asg,
            'PolicyName': policy_name,
            'PolicyType': 'TargetTrackingScaling',
            'TargetTrackingConfiguration': {
                'CustomizedMetricSpecification': {
                    'MetricName':
                    metric_name,
                    'Namespace':
                    'HySDS',
                    'Dimensions': [{
                        'Name': 'AutoScalingGroupName',
                        'Value': asg,
                    }, {
                        'Name': 'Queue',
                        'Value': queue,
                    }],
                    'Statistic':
                    'Maximum'
                },
                'TargetValue': 1.0,
                'DisableScaleIn': True
            },
        }
        logger.debug("ttsp_args: {}".format(pformat(ttsp_args)))
        ttsp_info = c.put_scaling_policy(**ttsp_args)
        logger.debug("Target tracking scaling policy {}: {}".format(
            policy_name, pformat(ttsp_info)))
        print(("Added target tracking scaling policy {} to {}".format(
            policy_name, asg)))