Exemple #1
0
def dns_update(score, client, sl_storage, containername, configuration):

    dns_m = SoftLayer.DNSManager(client)
    zones = dns_m.list_zones()
    score['resources']['dns'] = normalize(score['resources']['dns'], score)
    lib.debug(score['resources']['dns'])
    for domain, zonedef in score['resources']['dns'].iteritems():
        zone = lib.findInList(zones, 'name', domain)
        if not zone:
            raise Exception("no zone found for {}".format(domain))
        for entry in zonedef:
            group = entry.split(".")[1]
            lib.debug("reg vms in group {} to dns zone {}".format(group,
                                                                  domain))
            for vm in score['resources']['serverinstances'][group]['vms']:

                if not vm['domain'].endswith(domain):
                    sys.stderr.write("{}.{} not in zone {}\n".format(
                            vm['hostname'], vm['domain'], domain))
                    break

                # strip out root domain to register with DNS as a host
                #   record w/in the root domain

                record = "{}.{}".format(vm['hostname'], vm['domain'])
                record = record[:-(len(domain)+1)]
                lib.debug(dns_m.create_record(zone_id=zone['id'],
                                              record=record,
                                              record_type='A',
                                              data=vm['primaryIpAddress'],
                                              ttl=900))
                lib.save_state(sl_storage, containername,
                               "dns/{}/{}".format(domain, record),
                               vm['primaryIpAddress'])
Exemple #2
0
def manual_provision_vms(vs_config,
                         groupname,
                         groupdef,
                         clustername,
                         score,
                         client,
                         sl_storage,
                         configuration,
                         containername):
    configs = []
    count = int(groupdef['count'])
    for i in range(count):
        vscopy = vs_config.copy()
        vscopy['hostname'] = vscopy['hostname']+'-'+str(i)
        configs.append(vscopy)
    lib.debug(json.dumps(configs, indent=4, sort_keys=True))

    vs_manager = SoftLayer.VSManager(client)

    vms = lib.sl_retry(vs_manager.create_instances, configs)

    for vm in vms:
        lib.save_state(sl_storage, containername,
                       "serverinstances/{}/vms/{}/id".format(
                            groupname, vm['hostname']),
                       vm['id'])

    for vm in vms:
        lib.sl_retry(vs_manager.wait_for_ready, vm['id'], 600)

    groupdef['vms'] = []
    for vm in vms:
        groupdef['vms'].append(vs_manager.get_instance(vm['id']))
Exemple #3
0
def do_create(args, client, sl_storage, configuration):

    if args['-v']:
        DebugLevel.set_level('verbose')
    else:
        DebugLevel.set_level('progress')

    containername = args['<clustername>']
    if args['<clustername>'] in clusters(sl_storage):
        error('cluster {} already exists'.format(args['<clustername>']))

    scoretext = open(args['<score.yaml>'], 'r').read()
    score = yaml.load(scoretext)

    score['clustername'] = args['<clustername>']
    dirname = os.path.dirname(args['<score.yaml>'])
    if dirname == "":
        dirname = "."
    score['path'] = dirname + "/"

    # setup environment for scripts in score to run properly. Change to
    #  the score directory and add . to the path
    os.chdir(score['path'])
    os.environ['PATH'] = ':'.join([os.environ['PATH'], './'])

    if 'parameters' in score:
        parmvalues = score['parameters']
    else:
        parmvalues = {}

    parameters = args['<key=value>']
    for param in parameters:
        splits = param.split('=', 1)
        if len(splits) != 2:
            raise Exception("{} is not a key=value pair".format(param))
        parmvalues[splits[0]] = splits[1]
    score['parameters'] = parmvalues
    scoretext = yaml.dump(score, indent=4)

    msg = validate_provision_parms_passed(scoretext, parmvalues)
    debug(msg)
    if msg:
        error(msg)

    state_container_create(sl_storage, containername)
    try:
        # save score for later operations
        save_state(sl_storage, containername, 'score', scoretext)
        provision(args['<clustername>'], containername, score, configuration,
                  client, sl_storage)
    except Exception, e:
        debug(traceback.format_exc())
        resources = get_resources(sl_storage, containername)
        del resources['score']
        if deletable(resources):
            state_container_clean(sl_storage, containername)
        error(e.message)
Exemple #4
0
def do_create(args, client, sl_storage, configuration):

    if args['-v']:
        DebugLevel.set_level('verbose')
    else:
        DebugLevel.set_level('progress')

    containername = args['<clustername>']
    if args['<clustername>'] in clusters(sl_storage):
        error('cluster {} already exists'.format(args['<clustername>']))

    scoretext = open(args['<score.yaml>'], 'r').read()
    score = yaml.load(scoretext)

    score['clustername'] = args['<clustername>']
    dirname = os.path.dirname(args['<score.yaml>'])
    if dirname == "":
        dirname = "."
    score['path'] = dirname+"/"

    # setup environment for scripts in score to run properly. Change to
    #  the score directory and add . to the path
    os.chdir(score['path'])
    os.environ['PATH'] = ':'.join([os.environ['PATH'], './'])

    if 'parameters' in score:
        parmvalues = score['parameters']
    else:
        parmvalues = {}

    parameters = args['<key=value>']
    for param in parameters:
        splits = param.split('=', 1)
        if len(splits) != 2:
            raise Exception("{} is not a key=value pair".format(param))
        parmvalues[splits[0]] = splits[1]
    score['parameters'] = parmvalues
    scoretext = yaml.dump(score, indent=4)

    msg = validate_provision_parms_passed(scoretext, parmvalues)
    debug(msg)
    if msg:
        error(msg)

    state_container_create(sl_storage, containername)
    try:
        # save score for later operations
        save_state(sl_storage, containername, 'score', scoretext)
        provision(args['<clustername>'], containername, score,
                  configuration, client, sl_storage)
    except Exception, e:
        debug(traceback.format_exc())
        resources = get_resources(sl_storage, containername)
        del resources['score']
        if deletable(resources):
            state_container_clean(sl_storage, containername)
        error(e.message)
Exemple #5
0
def provision_loadbalancers(score, client, sl_storage, containername,
                            configuration):

    if 'loadbalancers' not in score['resources']:
        return

    lbmgr = SoftLayer.LoadBalancerManager(client)
    all_pkgs = lbmgr.get_lb_pkgs()
    # lib.debug([ (i['capacity']) for i in all_pkgs ])

    for lbname, lbconfig in score['resources']['loadbalancers'].iteritems():
        lbconfig = normalize(lbconfig, score)
        score['resources']['loadbalancers'][lbname] = lbconfig
        # first find lb packages with given connection support
        lbs_available = lib.findInList(all_pkgs,
                                       'capacity',
                                       str(lbconfig['connections']))
        if lbs_available is None:
            msg = 'no loadbalancer option found with capacity {}'
            raise Exception(msg.format(lbconfig['connections']))

        # if only one option available use it...
        #  otherwise do some more filtering
        if isinstance(lbs_available, list):
            # find the requested ssl support
            if 'ssl-offload' in lbconfig and lbconfig['ssl-offload']:
                lbs_available = lib.findInList(
                                        lbs_available, 'keyName', 'SSL',
                                        (lambda field, val: val in field))
            else:
                lbs_available = lib.findInList(
                                        lbs_available, 'keyName', 'SSL',
                                        (lambda field, v: v not in field))

            # lib.debug(lbs_available)

        # build a list to walk through
        if not isinstance(lbs_available, list):
            lbs_available = [lbs_available]

        # find prices for the current datacenter
        priceitems = []
        for lbitem in lbs_available:
            lib.debug(lbitem)
            priceitems.append(findPriceIdsForDatacenter(lbitem['prices'],
                                                        score['datacenter']))

        # sort the priceitems and pick the inexpensive one
        priceitems = sorted(priceitems, key=lambda p: float(p['recurringFee']))

        lib.debug(json.dumps(priceitems, indent=4))
        # do the create now
        lib.debug(priceitems[0])
        lib.debug(priceitems[0]['id'])
        order = lbmgr.add_local_lb(priceitems[0]['id'],
                                   score['datacenter']['name'])
        lib.debug(order)
        # wait for some time for order to be fulfilled
        billingItem = waitForOrderCompletion(order['orderId'], client)
        lib.debug(billingItem)
        # now list all load balancers
        all_lbs = client['Account'].getAdcLoadBalancers(mask='billingItem')
        provisioned_lb = lib.findInList(all_lbs, 'billingItem', billingItem,
                                        (lambda field,
                                            val: field['id'] == val))
        lib.debug(provisioned_lb)
        lib.save_state(sl_storage, containername,
                       "loadbalancers/{}/id".format(lbname),
                       provisioned_lb['id'])
        lbconfig['id'] = provisioned_lb['id']
        objtype = 'Network_Application_Delivery_Controller_LoadBalancer_'\
                  'Routing_Type'
        routing_types = client[objtype].getAllObjects()
        objtype = 'Network_Application_Delivery_Controller_LoadBalancer_'\
                  'Routing_Method'
        routing_methods = client[objtype].getAllObjects()
        for groupname, groupconfig in lbconfig['service-groups'].iteritems():
            lib.debug(groupconfig)
            routingtype = lib.findInList(routing_types, 'name',
                                         groupconfig['type'].upper())
            lib.debug(routingtype)
            routingmethod = lib.findInList(routing_methods, 'keyname',
                                           groupconfig['method'].upper())
            lib.debug(routingmethod)
            lib.debug(lbmgr.add_service_group(provisioned_lb['id'],
                                              groupconfig['allocation%'],
                                              groupconfig['port'],
                                              routingtype['id'],
                                              routingmethod['id']))
            # refresh lb info
            objtype = 'Network_Application_Delivery_Controller_LoadBalancer'\
                      '_VirtualIpAddress'
            lb = client[objtype].getObject(id=provisioned_lb['id'],
                                           mask="virtualServers.serviceGroups")
            groupconfig['id'] = lib.findInList(lb['virtualServers'], 'port',
                                               groupconfig['port'])['id']
Exemple #6
0
def autoscale_provision_vms(vs_config, groupname, groupdef, clustername, score,
                            client, sl_storage, configuration, containername):
    autoscaledef = groupdef['autoscale']
    if 'regionalGroupID' not in score['datacenter']:
        raise Exception('datacenter does not support autoscale groups')

    asconfig = {
        'cooldown': 30,
        'suspendedFlag': True,
        'name': "{}_{}_{}".format(score['name'], clustername, groupname),
        'regionalGroupId': score['datacenter']['regionalGroupID'],
        'minimumMemberCount': groupdef['count'],
        # autoscaledef['minimumMemberCount'],
        'minimumVirtualGuestMemberCount': groupdef['count'],
        # autoscaledef['minimumMemberCount'],
        'maximumMemberCount': autoscaledef['maximumMemberCount'],
        'maximumVirtualGuestMemberCount': autoscaledef['maximumMemberCount'],
        'terminationPolicyId':
            lib.findInList(client['Scale_Termination_Policy'].getAllObjects(),
                           'keyName', 'CLOSEST_TO_NEXT_CHARGE')['id'],
        'virtualGuestMemberTemplate':  {
            'startCpus': vs_config['cpus'],
            'maxMemory': vs_config['memory'],
            'datacenter': {"name": score['datacenter']['name']},
            'hostname': vs_config['hostname'],
            'domain': vs_config['domain'],
            'operatingSystemReferenceCode': vs_config['os_code'],
            'hourlyBillingFlag': vs_config['hourly'],
            'localDiskFlag': False
        }
    }

    if 'local_disk' in vs_config:
        asconfig['virtualGuestMemberTemplate']['localDiskFlag'] = True

    if 'disks' in vs_config:
        asconfig['virtualGuestMemberTemplate']['blockDevices'] = [
            {"device": "0", "diskImage": {"capacity": vs_config['disks'][0]}}
        ]

        for dev_id, disk in enumerate(vs_config['disks'][1:], start=2):
            asconfig['virtualGuestMemberTemplate']['blockDevices'].append(
                {
                    "device": str(dev_id),
                    "diskImage": {"capacity": disk}
                })

    if 'nic_speed' in vs_config:
        asconfig['virtualGuestMemberTemplate']['networkComponents'] = [{
            'maxSpeed': vs_config['nic_speed']
        }]

    if 'ssh_keys' in vs_config:
        asconfig['virtualGuestMemberTemplate']['sshKeys'] = []
        for key in vs_config['ssh_keys']:
            asconfig['virtualGuestMemberTemplate']['sshKeys'].append(
                {"id": key})

    if 'private_vlan' in vs_config:
        asconfig['networkVlans'] = [
            {"networkVlanId": vs_config['private_vlan']}
        ]
    if 'public_vlan' in vs_config:
        publicvlan = {"networkVlanId": vs_config['public_vlan']}
        if 'networkVlans' in asconfig:
            asconfig['networkVlans'].append(publicvlan)
        else:
            asconfig['networkVlans'] = [publicvlan]

    if 'post_uri' in vs_config:
        vt = 'virtualGuestMemberTemplate'
        asconfig[vt]['postInstallScriptUri'] = vs_config['post_uri']
    if 'userdata' in vs_config:
        asconfig[vt]['userdata'] = [{'value': vs_config['userdata']}]

    lib.debug(json.dumps(asconfig, indent=4, sort_keys=True))
    asgroup = client['Scale_Group'].createObject(asconfig)
    lib.debug(asgroup)
    lib.save_state(sl_storage, containername,
                   "serverinstances/{}/autoscale/id".format(groupname),
                   asgroup['id'])

    triggertypes = client['Scale_Policy_Trigger_Type'].getAllObjects()
    lib.debug(json.dumps(triggertypes))

    if 'polcies' in autoscaledef:
        for policyname, policydef in autoscaledef['policies'].iteritems():
            lib.debug(json.dumps(policydef))
            if 'duration' in policydef:
                duration = policydef['duration']
            trigger = trigger_read(policydef['trigger'], triggertypes,
                                   duration)
            action = action_read(policydef['action'])
            lib.debug(trigger)
            lib.debug(action)

            newpolicy = {
                "name": policyname,
                "scaleGroupId": asgroup['id'],
                "complexType": 'SoftLayer_Scale_Policy',
                'scaleActions': [action]
            }
            lib.debug(newpolicy)
            policy = client['Scale_Policy'].createObject(newpolicy)
            triggerclass = trigger["class"]
            # remove field
            del trigger["class"]
            trigger["scalePolicyId"] = policy["id"]
            lib.debug(trigger)
            trigger = client[triggerclass].createObject(trigger)
            lib.debug(trigger)

    if 'loadbalancer' in autoscaledef:
        lbconfig = autoscaledef['loadbalancer']
        x = lbconfig['name'].split('.')
        lbname = x[0]
        lbgroup = x[1]
        sg = score['resources']['loadbalancers'][lbname]['service-groups']
        lbg = sg[lbgroup]
        newdef = {
            'scaleGroupId': asgroup['id'],
            'virtualServerId': lbg['id'],
            'healthCheck': {'type':
                            {'keyname': lbg['health_check'].upper()}},
            'port': lbconfig['balance-to']
        }

        debug(client['Scale_LoadBalancer'].createObject(newdef))

    # now activate the group
    client['Scale_Group'].resume(id=asgroup['id'])

    # sleep till provisioning of vms done
    sleeps = 600/5
    mask = "virtualGuestMembers.virtualGuest.primaryIpAddress"
    while sleeps != 0:
        lib.debug('sleeping')
        time.sleep(5)
        sleeps = sleeps - 1
        asgroup = client['Scale_Group'].getObject(id=asgroup['id'], mask=mask)
        if asgroup['status']['keyName'] == 'ACTIVE':
            groupdef['vms'] = [v['virtualGuest']
                               for v in asgroup['virtualGuestMembers']]
            break
    client['Scale_Group'].editObject(
        {
          'minimumMemberCount': autoscaledef['minimumMemberCount'],
          'minimumVirtualGuestMemberCount': autoscaledef['minimumMemberCount'],
        }, id=asgroup['id'])