Exemplo n.º 1
0
    def testGetFromPools(self):
        p1 = clusto.get_or_create('p1', Pool)
        p2 = clusto.get_or_create('p2', Pool)

        s1 = BasicServer('s1')
        s2 = BasicServer('s2')
        s3 = BasicServer('s3')
        s4 = BasicServer('s4')
        s5 = BasicServer('s5')
        s6 = BasicServer('s6')

        p1.insert(s1)
        p1.insert(s2)
        p1.insert(s3)
        p2.insert(s3)
        p2.insert(s4)
        p2.insert(s5)

        self.assertEqual(
            sorted([s1, s2, s3]),
            sorted(
                clusto.get_from_pools(pools=[p1], clusto_types=[BasicServer])))
        self.assertEqual(
            sorted([s3, s4, s5]),
            sorted(
                clusto.get_from_pools(pools=[p2], clusto_types=[BasicServer])))
        self.assertEqual(
            sorted([s3]),
            sorted(
                clusto.get_from_pools(pools=[p1, p2],
                                      clusto_types=[BasicServer])))

        self.assertRaises(LookupError, clusto.get_from_pools, 'p1',
                          'non-existent-pool')
Exemplo n.º 2
0
    def testGetFromPools(self):
        p1 = clusto.get_or_create('p1', Pool)
        p2 = clusto.get_or_create('p2', Pool)

        s1 = BasicServer('s1')
        s2 = BasicServer('s2')
        s3 = BasicServer('s3')
        s4 = BasicServer('s4')
        s5 = BasicServer('s5')
        s6 = BasicServer('s6')

        p1.insert(s1)
        p1.insert(s2)
        p1.insert(s3)
        p2.insert(s3)
        p2.insert(s4)
        p2.insert(s5)

        self.assertEqual(sorted([s1, s2, s3]),
                         sorted(clusto.get_from_pools(pools=[p1],
                                                      clusto_types=[BasicServer])))
        self.assertEqual(sorted([s3, s4, s5]),
                         sorted(clusto.get_from_pools(pools=[p2],
                                                      clusto_types=[BasicServer])))
        self.assertEqual(sorted([s3]),
                         sorted(clusto.get_from_pools(pools=[p1, p2],
                                                      clusto_types=[BasicServer])))

        self.assertRaises(LookupError,
                          clusto.get_from_pools, 'p1', 'non-existent-pool')
Exemplo n.º 3
0
    def run(self, args):
        self.info('Creating the datacenter manager')

        key = args.key_id.pop()
        dcman = clusto.get_or_create(args.dcman,
            smdatacentermanager.SMDatacenterManager,
            key_id=key)

        p = None
        if args.add_to_pool:
            p = clusto.get_or_create(args.add_to_pool, drivers.pool.Pool)

        self.info('Creating known joyent locations and importing '
            'existing machines')
        for loc, url in smartdc.KNOWN_LOCATIONS.items():
            self.debug('Creating location %s' % (loc,))
            sdc = clusto.get_or_create(loc, smdatacenter.SMDatacenter,
                location=loc)
            if p and sdc not in p:
                p.insert(sdc)
            self.debug('Testing connectivity to %s' % (loc,))
            try:
                machines = dcman._connection(loc).machines()
                self.debug('Number of existing machines: %d' %
                    (len(machines),))
            except Exception as e:
                self.warn(e)
                self.error('Your credentials seem to be incorrect for %s' %
                    (loc,))
                continue
            if not args.no_import:
                for machine in machines:
                    self.debug('Getting/creating clusto object for %s' %
                        (machine))
                    name = machine.name or machine.id.replace('-', '_')
                    n = clusto.get_or_create(name,
                        smserver.SMVirtualServer, machine_id=machine.id)
                    if n not in sdc:
                        sdc.insert(n)
                    if n not in dcman.referencers():
                        dcman.allocate(n, resource=machine)
                        n.update_metadata()
                    self.debug('%s is imported' % (machine,))
            self.info('Created and imported resources for %s' % (loc,))
        self.info('Done.')
Exemplo n.º 4
0
def make_all_ec2_objects(aws_access_key_id=None, aws_secret_access_key=None):
    ec2man = clusto.get_entities(clusto_types=[clusto.drivers.EC2VMManager])

    if not ec2man:
        if not aws_access_key_id and not aws_secret_access_key:
            raise Exception("you must specify both an aws_access_key_id and an "
                            "aws_secret_access_key if you don't already have "
                            "an EC2VMManager")
        
        ec2man = clusto.drivers.EC2VMManager('ec2vmman',
                                            aws_access_key_id=aws_access_key_id,
                                            aws_secret_access_key=aws_secret_access_key)

    else:
        ec2man = ec2man.pop()

    ec2ipman = clusto.get_entities(clusto_types=[clusto.drivers.EC2IPManager])

    if not ec2ipman:
        if not aws_access_key_id and not aws_secret_access_key:
            raise Exception("you must specify both an aws_access_key_id and an "
                            "aws_secret_access_key if you don't already have "
                            "an EC2IPManager")

        ec2ipman = clusto.drivers.EC2IPManager('ec2ipman',
                                            aws_access_key_id=aws_access_key_id,
                                            aws_secret_access_key=aws_secret_access_key)
    else:
        ec2ipman = ec2ipman.pop()

    conn = ec2man._ec2_connection()

    for region in conn.get_all_regions():
        curconn = ec2man._ec2_connection(region.name)
        region_entity = clusto.get_or_create(region.name,
                                             clusto.drivers.EC2Region,
                                             region=region.name)
        for zone in curconn.get_all_zones():
            zone_entity = clusto.get_or_create(zone.name,
                                               clusto.drivers.EC2Zone,
                                               placement=zone.name)

            if zone_entity not in region_entity:
                region_entity.insert(zone_entity)
Exemplo n.º 5
0
    def testGetFromPools(self):
        p1 = clusto.get_or_create('p1', Pool)
        p2 = clusto.get_or_create('p2', Pool)

        s1 = BasicServer('s1')
        s2 = BasicServer('s2')
        s3 = BasicServer('s3')
        s4 = BasicServer('s4')
        s5 = BasicServer('s5')

        p1.insert(s1)
        p1.insert(s2)
        p1.insert(s3)
        p2.insert(s3)
        p2.insert(s4)
        p2.insert(s5)

        self.assertEqual(sorted([s1, s2, s3]),
                         sorted(clusto.get_from_pools(pools=[p1],
                                                      clusto_types=[BasicServer])))
        self.assertEqual(sorted([s3, s4, s5]),
                         sorted(clusto.get_from_pools(pools=[p2],
                                                      clusto_types=[BasicServer])))
        self.assertEqual(sorted([s3]),
                         sorted(clusto.get_from_pools(pools=[p1, p2],
                                                      clusto_types=[BasicServer])))

        self.assertRaises(LookupError,
                          clusto.get_from_pools, 'p1', 'non-existent-pool')

        # mixing types in get_from_pools should raise a TypeError now
        self.assertRaises(TypeError,
                          clusto.get_from_pools, ['p1', 's1'], 'dont-mix-types')

        # lax joining in get_from_entities should return an empty list in this case because s1 has no children
        self.assertEqual([], sorted(clusto.get_from_entities(['p1', 's1'])))
Exemplo n.º 6
0
 def __init__(self, name, datacenter):
     self.datacenter = datacenter
     self.rack = clusto.get_or_create(name, APCRack)
     self.switch = clusto.get_or_create(name + '-sw1', Cisco4948)
     self.console = clusto.get_or_create(name + '-ts1', OpenGearCM4148)
     self.power = clusto.get_or_create(name + '-pwr1', PowerTowerXM)
Exemplo n.º 7
0
    def run(self, args):
        number = args.number

        if args.from_pool:
            self.debug('Grabbing pool from parameter')
            pool = args.from_pool
        else:
            self.debug('Grabbing pool from config file or default')
            pool = self.get_conf('allocate.pool', 'unallocated')
        self.debug('Unallocated pool is "%s"' % pool)

        pools = []
        try:
            if args.create_pools:
                pools = [
                    clusto.get_or_create(_, drivers.pool.Pool)
                    for _ in args.pool
                ]
            else:
                pools = [
                    clusto.get_by_name(_, assert_driver=drivers.pool.Pool)
                    for _ in args.to_pool
                ]
        except Exception as e:
            self.debug(e)
            self.error(
                'There was an error when fetching/creating the pools to allocate the servers to'
            )
            return 4
        self.debug('Target pool list: %s' % pools)

        try:
            pool = clusto.get_by_name(pool, assert_driver=drivers.pool.Pool)
        except Exception as e:
            self.debug(e)
            self.error('The pool "%s" does not exist' % pool)
            return 1

        try:
            parent = clusto.get(args.parent)
            if not parent:
                raise LookupError("Parent object is %s" % parent)
        except Exception as e:
            self.debug(e)
            self.error('The parent object "%s" does not exist' % args.parent)
            return 2
        parent = parent[0]
        if not isinstance(parent, drivers.racks.BasicRack) and not isinstance(
                parent, drivers.datacenters.BasicDatacenter):
            self.error('The parent "%s" is not a rack or a datacenter' %
                       args.parent)
            return 2

        self.info('Searching for servers in "%s", this may take a while' %
                  parent.name)

        unallocated = [
            _ for _ in parent.contents(
                clusto_types=[drivers.servers.BasicServer],
                search_children=True)
        ]
        unallocated = [_ for _ in unallocated if _ in pool and _.get_ips()]
        self.debug('The unallocated list size is %d' % len(unallocated))
        if len(unallocated) < number:
            self.error(
                'There are not enough servers in "%s" to fulfill your request'
                % args.parent)
            return 3

        filters = []
        if args.memory:
            filters.append(self.__make_filter('memory', args.memory * 1000))
        if args.disk:
            filters.append(self.__make_filter('disk', args.disk))
        if args.cores:
            filters.append(self.__make_filter('cpucount', args.cores))
        if args.spindles:
            filters.append(self.__make_filter('spindles', args.spindles))

        self.debug('Applying filters: %s' % filters)
        servers = []
        if not filters:
            servers = self.__sort_servers(unallocated)[:number]
        else:
            for func in filters:
                servers = filter(func, unallocated)[:number]
        self.debug('Server list: %s' % servers)

        for s in servers:
            pool.remove(s)
            for p in pools:
                p.insert(s)

        self.info(
            'Allocated the following list of servers matching your filters '
            'were allocated from the pool "%s"' % pool.name)
        self.info('The servers were also added to the pools %s' %
                  ','.join([_.name for _ in pools]))
        for s in servers:
            if s.get_ips():
                print s.get_ips()[0]
            else:
                print s.name
Exemplo n.º 8
0
    def run(self, args):
        self.debug('Grab or create the VM Manager')
        try:
            ec2connman = clusto.get_by_name(
                args.conn_manager,
                assert_driver=ec2_drivers.resourcemanagers.EC2ConnectionManager
            )
            vpcman = clusto.get_by_name(
                args.vpc_manager,
                assert_driver=ec2_drivers.resourcemanagers.VPCConnectionManager
            )
        except LookupError:
            if not args.aws_key and not args.aws_secrets_key:
                raise Exception(
                    'you must specify both an aws_access_key_id '
                    'and an aws_secret_access_key if you don\'t already have '
                    'an EC2ConnectionManager'
                )
            ec2connman = clusto.get_or_create(
                args.conn_manager,
                ec2_drivers.resourcemanagers.EC2ConnectionManager,
                aws_access_key_id=args.aws_key,
                aws_secret_access_key=args.aws_secret_key
            )
            self.info(
                'Created the "%s" EC2 Connection Manager' % (
                    args.conn_manager
                )
            )
            vpcman = ec2_drivers.resourcemanagers.VPCConnectionManager(
                args.vpc_manager,
                aws_access_key_id=args.aws_key,
                aws_secret_access_key=args.aws_secret_key
            )
            self.info(
                'Created the "%s" VPC Connection Manager' % (
                    args.vpc_manager
                )
            )

        container_pool = None
        if args.add_to_pool:
            container_pool = clusto.get_or_create(
                args.add_to_pool, drivers.pool.Pool)
        self.info('Creating all available regions')
        for region in ec2connman._connection().get_all_regions():
            curconn = ec2connman._connection(region.name)
            vpcconn = vpcman._connection(region.name)
            region_entity = clusto.get_or_create(
                region.name,
                ec2_drivers.locations.datacenters.EC2Region,
                region=region.name
            )
            region_entity.set_attr(
                key='aws', subkey='ec2_region',
                value=region.name
            )
            self.debug('Created "%s" region' % (region.name, ))
#           Create all VPCs (if any)
            self.info(
                'Creating all VPCs (if any) for region %s' % (
                    region.name,
                )
            )
            for v in vpcconn.get_all_vpcs():
                v_entity = clusto.get_or_create(
                    v.id,
                    ec2_drivers.locations.datacenters.VPC,
                    vpc=v.id,
                )
                if v_entity not in vpcman.referencers():
                    vpcman.allocate(v_entity)
                    vpcman.additional_attrs(
                        v_entity,
                        resource={'vpc': v}
                    )
                self.debug('Created "%s" VPC' % (v.id, ))
                if v_entity not in region_entity:
                    region_entity.insert(v_entity)
#               Create all subnets (if any)
                self.info(
                    'Creating all subnets (if any) for VPC %s' % (
                        v.id,
                    )
                )
                for sn in vpcconn.get_all_subnets(filters={'vpc_id': v.id}):
                    sn_entity = clusto.get_or_create(
                        sn.id,
                        ec2_drivers.locations.zones.VPCSubnet,
                        subnet=sn.id,
                    )
                    if sn_entity not in vpcman.referencers():
                        vpcman.allocate(sn_entity)
                        vpcman.additional_attrs(
                            sn_entity,
                            resource={'subnet': sn}
                        )
                    if sn_entity not in v_entity:
                        v_entity.insert(sn_entity)
                        self.debug('Inserted subnet %s in VPC %s' % (
                            sn.id, v.id, )
                        )
#           Create all zones
            self.info(
                'Creating all availability zones for region %s' % (
                    region.name,
                )
            )
            for zone in curconn.get_all_zones():
                zone_entity = clusto.get_or_create(
                    zone.name,
                    ec2_drivers.locations.zones.EC2Zone,
                    placement=zone.name
                )
                zone_entity.set_attr(
                    key='aws', subkey='ec2_placement',
                    value=zone.name
                )
                self.debug('Created "%s" zone' % (zone.name, ))
                if zone_entity not in region_entity:
                    region_entity.insert(zone_entity)
                self.debug(
                    'Inserted "%s" zone in "%s" region' % (
                        zone.name, region.name,
                    )
                )
#               if there are subnets in this region, insert them
                for sn in vpcconn.get_all_subnets(
                    filters={
                        'availability-zone': zone.name,
                    }
                ):
                    sn_entity = clusto.get_by_name(sn.id)
                    if sn_entity not in zone_entity:
                        zone_entity.insert(sn_entity)
                        self.debug('Inserted %s subnet in %s AZ' % (
                            sn.id, zone.name, )
                        )
            if container_pool and region_entity not in container_pool:
                self.debug(
                    'Adding region %s to pool %s' % (
                        region.name, args.add_to_pool,
                    )
                )
                container_pool.insert(region_entity)

        self.info('Creating all security groups')
        sgs = vpcman._connection().get_all_security_groups()
        belong = {}
        for sg in sgs:
            belong[sg.id] = [_.id for _ in sg.instances()]
        sgs = [(_.id, _.name, _.region.name, _.vpc_id) for _ in sgs]
        for sg_id, sg_name, region_name, vpc_id in sgs:
            self.debug(
                'Importing %s (%s), region: %s, vpc? %s' % (
                    sg_name, sg_id, region_name, bool(vpc_id),
                )
            )
            sg_ent = clusto.get_or_create(
                sg_id,
                ec2_drivers.categories.securitygroup.EC2SecurityGroup,
                group_id=sg_id,
                group_name=sg_name
            )
            if vpc_id:
                parent = clusto.get_by_name(vpc_id)
            else:
                parent = clusto.get_by_name(region_name)
            if sg_ent not in parent:
                self.debug('Inserting security group %s into %s' % (sg_id, vpc_id or region_name,))
                parent.insert(sg_ent)

        if not args.no_import:
            self.info('Creating all instances')
            for reservations in vpcman._connection().get_all_instances():
                for instance in reservations.instances:
                    idriver = ec2_drivers.devices.servers.EC2VirtualServer
                    connman = ec2connman
                    name = instance.tags.get('Name', instance.id).lower().replace(' ', '_')
                    if instance.vpc_id and instance.subnet_id:
                        idriver = ec2_drivers.devices.servers.VPCVirtualServer
                        connman = vpcman
                    self.debug('Creating %s instance (%s)' % (name, idriver, ))
                    instance_entity = clusto.get_or_create(
                        name,
                        idriver,
                    )
                    placement = clusto.get_by_name(instance.subnet_id or instance.placement)
                    self.debug('Inserting instance %s into %s' % (name, placement, ))
                    if instance_entity not in placement:
                        placement.insert(instance_entity)
                    instance_entity.set_attr(
                        key='aws', subkey='ec2_instance_type',
                        value=instance.instance_type
                    )
                    if instance.key_name is not None:
                        instance_entity.set_attr(
                            key='aws', subkey='ec2_key_name',
                            value=instance.key_name
                        )
                    instance_entity.set_attr(
                        key='aws',
                        subkey='ec2_instance_id',
                        value=instance.id,
                    )

                    for sg, instances in belong.items():
                        if instance.id in instances:
                            sg_ent = clusto.get_by_name(sg)
                            if instance_entity not in sg_ent:
                                self.debug(
                                    'Adding instance %s to security group %s' % (
                                        instance.id, sg,
                                    )
                                )
                                sg_ent.insert(instance_entity)

                    self.debug('Allocating instance %s from %s' % (name, connman, ))
                    if instance_entity not in connman.referencers():
                        connman.allocate(instance_entity)
                        connman.additional_attrs(
                            instance_entity,
                            resource={'instance': instance}
                        )
                        instance_entity.update_metadata()
                    self.debug('%s is imported' % (instance,))
        self.info('Finished, AWS objects should now be in the database')
Exemplo n.º 9
0
def create(driver):
    """
This differs from the standard way of creating entities is that resource
managers can have a number of extra parameters added to them that not
necessarily match any of the other entities. These parameters are defined
by each resource manager driver and are pretty much arbitrary. Seems like
a good idea to separate these crucial differences.

Examples:

.. code:: bash

    $ ${post} -d 'name=nameman1' ${server_url}/resourcemanager/simplenamemanager
    {
        "attrs": [
            ...
        ],
        "contents": [],
        "count": 0,
        "driver": "simplenamemanager",
        "name": "nameman1",
        "parents": [],
        "type": "resourcemanager"
    }
    HTTP: 201
    Content-type: application/json

Will create a ``SimpleNameManager`` resource manager named ``namemgr1`` with
all default values set.

.. code:: bash

    $ ${post} -d 'name=ipman1' -d 'gateway=192.168.1.1' -d 'netmask=255.255.255.0' -d 'baseip=192.168.1.10' ${server_url}/resourcemanager/ipmanager
    {
        "attrs": [
            {
                "datatype": "string",
                "key": "baseip",
                "number": null,
                "subkey": "property",
                "value": "192.168.1.10"
            },
            {
                "datatype": "string",
                "key": "gateway",
                "number": null,
                "subkey": "property",
                "value": "192.168.1.1"
            },
            {
                "datatype": "string",
                "key": "netmask",
                "number": null,
                "subkey": "property",
                "value": "255.255.255.0"
            }
        ],
        "contents": [],
        "count": 0,
        "driver": "ipmanager",
        "name": "ipman1",
        "parents": [],
        "type": "resourcemanager"
    }
    HTTP: 201
    Content-type: application/json

Will create a  ``IPManager`` resource manager named ``ipman1`` with some
additional arguments such as ``netmask``, ``gateway`` and ``baseip``

"""
    if driver not in clusto.driverlist:
        return util.dumps('Requested driver "%s" does not exist' % (driver,), 412)
    cls = clusto.driverlist[driver]
    name = request.params.get('name')
    request.params.pop('name')

#   Pass any additional parameters as is to the constructor
    kwargs = {}
    for param, value in request.params.items():
        kwargs[param] = value

    found = None
    try:
        found = util.unclusto(clusto.get_by_name(name))
    except LookupError:
        pass

    obj = clusto.get_or_create(name, cls, **kwargs)

    headers = {}
    if found:
        headers['Warnings'] = 'Resource manager "%s" already exists' % (found,)

    code = 201
    if found:
        code = 202
    return util.dumps(util.show(obj), code, headers=headers)
Exemplo n.º 10
0
def create(driver):
    """
Creates a new object of the given driver.

 *  Requires HTTP parameters ``name``

Example:

.. code:: bash

    $ ${post} -d 'name=createpool1' ${server_url}/entity/pool
    [
        "/pool/createpool1"
    ]
    HTTP: 201
    Content-type: application/json

Will create a new ``pool1`` object with a ``pool`` driver. If the
``pool1`` object already exists, the status code returned will be 202,
and you will see whatever warnings in the ``Warnings`` header:

.. code:: bash

    $ ${post_i} -d 'name=createpool1' ${server_url}/entity/pool
    HTTP/1.0 202 Accepted
    ...
    Warnings: Entity(s) /pool/createpool1 already exist(s)...
    [
        "/pool/createpool1"
    ]

If you try to create a server of an unknown driver, you should receive
a 412 status code back:

.. code:: bash

    $ ${post} -d 'name=createobject' ${server_url}/entity/nondriver
    "Requested driver \"nondriver\" does not exist"
    HTTP: 412
    Content-type: application/json

The following example:

.. code:: bash

    $ ${post_i} -d 'name=createpool1' -d 'name=createpool2' ${server_url}/entity/pool
    HTTP/1.0 202 Accepted
    ...
    Warnings: Entity(s) /pool/createpool1 already exist(s)...
    [
        "/pool/createpool1",
        "/pool/createpool2"
    ]

Will attempt to create new objects ``createpool1`` and ``createpool2`` with
a ``pool`` driver. As all objects are validated prior to creation, if any of
them already exists the return code will be 202 (Accepted) and you will get
an extra header ``Warnings`` with the message.

"""

    if driver not in clusto.driverlist:
        return util.dumps('Requested driver "%s" does not exist' % (driver, ),
                          412)
    cls = clusto.driverlist[driver]
    names = request.params.getall('name')
    request.params.pop('name')

    found = []
    for name in names:
        try:
            found.append(util.unclusto(clusto.get_by_name(name)))
        except LookupError:
            pass

    result = []
    for name in names:
        result.append(util.unclusto(clusto.get_or_create(name, cls)))

    headers = {}
    if found:
        headers['Warnings'] = 'Entity(s) %s already exist(s)' % (
            ','.join(found), )

    code = 201
    if found:
        code = 202
    return util.dumps(result, code, headers=headers)
Exemplo n.º 11
0
    def run(self, args):
        number = args.number

        if args.from_pool:
            self.debug('Grabbing pool from parameter')
            pool = args.from_pool
        else:
            self.debug('Grabbing pool from config file or default')
            pool = self.get_conf('allocate.pool', 'unallocated')
        self.debug('Unallocated pool is "%s"' % pool)

        pools = []
        try:
            if args.create_pools:
                pools = [ clusto.get_or_create(_, drivers.pool.Pool) for _ in args.pool ]
            else:
                pools = [ clusto.get_by_name(_, assert_driver=drivers.pool.Pool) for _ in args.to_pool ]
        except Exception as e:
            self.debug(e)
            self.error('There was an error when fetching/creating the pools to allocate the servers to')
            return 4
        self.debug('Target pool list: %s' % pools)

        try:
            pool = clusto.get_by_name(pool, assert_driver=drivers.pool.Pool)
        except Exception as e:
            self.debug(e)
            self.error('The pool "%s" does not exist' % pool)
            return 1

        try:
            parent = clusto.get(args.parent)
            if not parent:
                raise LookupError("Parent object is %s" % parent)
        except Exception as e:
            self.debug(e)
            self.error('The parent object "%s" does not exist' % args.parent)
            return 2
        parent = parent[0]
        if not isinstance(parent, drivers.racks.BasicRack) and not isinstance(parent, drivers.datacenters.BasicDatacenter):
            self.error('The parent "%s" is not a rack or a datacenter' % args.parent)
            return 2

        self.info('Searching for servers in "%s", this may take a while' % parent.name)

        unallocated = [ _ for _ in parent.contents(clusto_types=[drivers.servers.BasicServer], search_children=True) ]
        unallocated = [ _ for _ in unallocated if _ in pool and _.get_ips() ]
        self.debug('The unallocated list size is %d' % len(unallocated))
        if len(unallocated) < number:
            self.error('There are not enough servers in "%s" to fulfill your request' % args.parent)
            return 3

        filters = []
        if args.memory:
            filters.append(self.__make_filter('memory', args.memory * 1000))
        if args.disk:
            filters.append(self.__make_filter('disk', args.disk))
        if args.cores:
            filters.append(self.__make_filter('cpucount', args.cores))
        if args.spindles:
            filters.append(self.__make_filter('spindles', args.spindles))

        self.debug('Applying filters: %s' % filters)
        servers = []
        if not filters:
            servers = self.__sort_servers(unallocated)[:number]
        else:
            for func in filters:
                servers = filter(func, unallocated)[:number]
        self.debug('Server list: %s' % servers)

        for s in servers:
            pool.remove(s)
            for p in pools:
                p.insert(s)

        self.info('Allocated the following list of servers matching your filters '
            'were allocated from the pool "%s"' % pool.name)
        self.info('The servers were also added to the pools %s' % ','.join([ _.name for _ in pools ]))
        for s in servers:
            if s.get_ips():
                print s.get_ips()[0]
            else:
                print s.name
Exemplo n.º 12
0
def barker_callback(body):
    if not 'ec2' in body:
        return
    if not 'instance-id' in body['ec2']:
        return
    ec2 = body['ec2']
    log.debug(ec2['instance-id'])

    try:
        clusto.begin_transaction()
        server = clusto.get_or_create(ec2['instance-id'], SGServer)

        if not server.attr_values(key='ec2', subkey='instance-id'):
            server.set_attr(key='ec2', subkey='instance-id', value=ec2['instance-id'])

        zone = clusto.get(ec2['placement'])
        if not zone:
            zone = EC2Zone(ec2['placement'])
        else:
            zone = zone[0]
        if not server in zone:
            zone.insert(server)

        for key, subkey in EC2_SUBKEYS.items():
            server.set_attr(key='ec2', subkey=subkey, value=ec2[key])

        previous_ec2sg = server.attr_values(key='ec2',subkey='security-group')
        for group in set(previous_ec2sg).difference(set(ec2['security-groups'])):
            server.del_attrs(key='ec2',subkey='security-group', value=group)

        for i,group in enumerate(sorted(ec2['security-groups'])):
            server.set_attr(key='ec2', subkey='security-group', number=i, value=group)
            if group.find('_') != -1:
                environment, role = group.lower().split('_', 1)
                p = clusto.get_or_create(environment, Pool)
                if not p.attrs(key='pooltype', value='environment'):
                    p.set_attr(key='pooltype', value='environment')
                if not server in p:
                    p.insert(server)

        #server.bind_ip_to_osport(ec2['local-ipv4'], 'nic-eth', 0)
        #server.bind_ip_to_osport(ec2['public-ipv4'], 'nic-eth', 0)
        if len(server.attrs(key='ip', subkey='ipstring')) != 2:
            server.del_attrs(key='ip', subkey='ipstring')
            server.add_attr(key='ip', subkey='ipstring', value=ec2['local-ipv4'], number=0)
            server.add_attr(key='ip', subkey='ipstring', value=ec2['public-ipv4'], number=0)

        system = body['os']
        server.set_attr(key='system', subkey='memory',
                        value=int(system['memory']['MemTotal']) / 1024)
        server.set_attr(key='system', subkey='hostname',
                        value=system['hostname'])
        server.set_attr(key='system', subkey='os',
                        value=system['operatingsystemrelease'])
        if 'cpu' in system and len(system['cpu']) > 0:
            server.set_attr(key='system', subkey='cputype',
                            value=system['cpu'][0]['model name'])
            server.set_attr(key='system', subkey='cpucount',
                            value=len(system['cpu']))
            server.set_attr(key='system', subkey='cpucache',
                            value=system['cpu'][0]['cache size'])

        if 'kernelrelease' in system:
            server.set_attr(key='system', subkey='kernelrelease',
                            value=system['kernelrelease'])

        previous_disk = server.attr_key_tuples(key='disk')
        incoming_disk = []
        blockmap = [(v.replace('/dev/', ''), k) for k, v in ec2['block-device-mapping'].items() if k != 'root']
        blockmap = dict(blockmap)
        total_disk = 0
        for i, disk in enumerate(system['disks']):
            for subkey in disk.keys():
                server.set_attr(key='disk', subkey=subkey, number=i, value=str(disk[subkey]))
                incoming_disk.append(('disk',i,subkey))
            if disk['osname'] in blockmap:
                server.set_attr(key='disk', subkey='ec2-type', number=i, value=blockmap[disk['osname']])
                incoming_disk.append(('disk',i,'ec2-type'))
            total_disk += disk['size']
        total_disk = total_disk / 1073741824
        server.set_attr(key='system', subkey='disk', value=total_disk)

        for attr in set(previous_disk).difference(set(incoming_disk)):
            server.del_attrs(key=attr[0],subkey=attr[2],number=attr[1])

        for subkey, value in body.get('sgmetadata', {}).items():
            server.set_attr(key='sgmetadata', subkey=subkey, value=value)
            if subkey == 'clusterid' and value:
                cluster = clusto.get_or_create(value, Pool)
                if not cluster.attrs(key='pooltype', value='clusterid'):
                    cluster.set_attr(key='pooltype', value='clusterid')
                if not server in cluster:
                    cluster.insert(server)
            if subkey == 'role' and value:
                if len(server.attr_values(key='puppet', subkey='class', merge_container_attrs=True)) == 0:
                    server.set_attr(key='puppet', subkey='class',
                                    value='site::role::%s' % value)

                p = clusto.get_or_create(value, Pool)
                if not p.attrs(key='pooltype', value='role'):
                    p.set_attr(key='pooltype', value='role')
                if not server in p:
                    p.insert(server)

        if len(server.attr_values(key='puppet', subkey='class', merge_container_attrs=True)) == 0:
            log.warning('Found host %s with no role set, using site::role::base' % ec2['instance-id'])
            server.set_attr(key='puppet', subkey='class',
                            value='site::role::base')

        #server.set_attr(key='barker', subkey='last_updated', value=int(time()))

        try:
            owners = body['owners']
            for owner, reason in owners.iteritems():
                server.set_attr(key='owner', subkey=owner, value=reason)
        except KeyError:
            pass

        clusto.commit()
    except:
        log.warning('Exception from %s: %s' % (ec2['instance-id'], format_exc()))
        clusto.rollback_transaction()
Exemplo n.º 13
0
 def __init__(self, name, datacenter):
     self.datacenter = datacenter
     self.rack = clusto.get_or_create(name, APCRack)
     self.switch = clusto.get_or_create(name + '-sw1', Cisco4948)
     self.console = clusto.get_or_create(name + '-ts1', OpenGearCM4148)
     self.power = clusto.get_or_create(name + '-pwr1', PowerTowerXM)
Exemplo n.º 14
0
    def run(self, args):
        self.debug('Grab or create the VM Manager')
        try:
            ec2connman = clusto.get_by_name(args.conn_manager,
                assert_driver=ec2_drivers.EC2ConnectionManager)
        except LookupError:
            if not args.aws_key and not args.aws_secrets_key:
                raise Exception("you must specify both an aws_access_key_id "
                    "and an aws_secret_access_key if you don't already have "
                    "an EC2ConnectionManager")
            ec2connman = ec2_drivers.EC2ConnectionManager(args.conn_manager,
                aws_access_key_id=args.aws_key,
                aws_secret_access_key=args.aws_secret_key)
            self.info('Created the "%s" EC2 Connection Manager' %
                (args.conn_manager))

        container_pool = None
        if args.add_to_pool:
            container_pool = clusto.get_or_create(
                args.add_to_pool, drivers.pool.Pool)
        self.info('Creating all available regions')
        for region in ec2connman._connection().get_all_regions():
            curconn = ec2connman._connection(region.name)
            region_entity = clusto.get_or_create(region.name,
                ec2_drivers.EC2Region,
                region=region.name)
            region_entity.set_attr(key='aws', subkey='ec2_region',
                value=region.name)
            self.debug('Created "%s" region' % (region.name, ))
#           Create all zones
            self.info('Creating all availability zones for region %s' %
                (region.name,))
            for zone in curconn.get_all_zones():
                zone_entity = clusto.get_or_create(zone.name,
                    ec2_drivers.EC2Zone,
                    placement=zone.name)
                zone_entity.set_attr(key='aws', subkey='ec2_placement',
                    value=zone.name)
                self.debug('Created "%s" zone' % (zone.name, ))
                if zone_entity not in region_entity:
                    region_entity.insert(zone_entity)
                self.debug('Inserted "%s" zone in "%s" region' %
                    (zone.name, region.name, ))
            if container_pool and region_entity not in container_pool:
                self.debug('Adding region %s to pool %s' %
                    (region.name, args.add_to_pool,))
                container_pool.insert(region_entity)

        if not args.no_import:
            self.info('Creating all instances')
            for reservations in ec2connman._connection().get_all_instances():
                for instance in reservations.instances:
                    instance_entity = clusto.get_or_create(instance.id,
                            ec2_drivers.EC2VirtualServer)
                    placement = clusto.get_by_name(instance.placement)
                    if placement not in instance_entity.parents():
                        placement.insert(instance_entity)
                    instance_entity.set_attr(key='aws', subkey='ec2_instance_type',
                            value=instance.instance_type)
                    if instance.key_name is not None:
                        instance_entity.set_attr(key='aws', subkey='ec2_key_name',
                                value=instance.key_name)
                    instance_entity.set_attr(key='aws',
                        subkey='ec2_instance_id',
                        value=instance.id
                    )
                    if instance_entity not in ec2connman.referencers():
                        ec2connman.allocate(instance_entity)
                        ec2connman.additional_attrs(instance_entity,
                            resource={'instance': instance})
                        instance_entity.update_metadata()
                    self.debug('%s is imported' % (instance,))
        self.info('Finished, AWS objects should now be in the database')
Exemplo n.º 15
0
def _configure(config={}, configfile=None, init_data={}):
    """
Configure the root app
"""

    if configfile:
        cfg = configfile
    else:
        cfg = os.environ.get(
            'CLUSTOCONFIG',
            '/etc/clusto/clusto.conf'
        )
    cfg = script_helper.load_config(cfg)
    clusto.connect(cfg)
#   This is an idempotent operation
    clusto.init_clusto()
#   If init_data is provided, populate it in the clusto database
    if init_data:
        for name, data in init_data.items():
            ent = clusto.get_or_create(
                name,
                data['driver'],
                **data.get('attrs', {})
            )
            for pool in data.get('member_of', []):
                clusto.get_by_name(pool).insert(ent)

            for attr in data.get('attr_list', []):
                ent.set_attr(**attr)

    kwargs = {}
    kwargs['host'] = config.get(
        'host',
        script_helper.get_conf(
            cfg, 'apiserver.host', default='127.0.0.1'
        ),
    )
    kwargs['port'] = config.get(
        'port',
        script_helper.get_conf(
            cfg, 'apiserver.port', default=9664, datatype=int
        ),
    )
    kwargs['server'] = config.get(
        'server',
        script_helper.get_conf(
            cfg, 'apiserver.server', default='wsgiref'
        ),
    )
    kwargs['server_kwargs'] = config.get(
        'server_kwargs',
        script_helper.get_conf(
            cfg, 'apiserver.server_kwargs', default={}, datatype=dict
        ),
    )
    kwargs['debug'] = config.get(
        'debug',
        script_helper.get_conf(
            cfg, 'apiserver.debug', default=False, datatype=bool
        )
    )
    kwargs['quiet'] = config.get(
        'quiet',
        script_helper.get_conf(
            cfg, 'apiserver.quiet', default=False, datatype=bool
        )
    )
    kwargs['reloader'] = config.get(
        'reloader',
        script_helper.get_conf(
            cfg, 'apiserver.reloader', default=False, datatype=bool
        )
    )
    mount_apps = config.get(
        'apps',
        script_helper.get_conf(
            cfg, 'apiserver.apps', default={}, datatype=dict
        )
    )
    response_headers = config.get(
        'response_headers',
        script_helper.get_conf(
            cfg, 'apiserver.response_headers', default={}, datatype=dict
        )
    )

    root_app.route('/__doc__', 'GET', functools.partial(build_docs))
    for mount_point, cls in mount_apps.items():
        module = importlib.import_module(cls)
        root_app.mount(mount_point, module.app)

        # Documentation endpoints
        module.app.route('/__doc__', 'GET', functools.partial(build_docs, cls))
        module.app.route('/__doc__/', 'GET', functools.partial(build_docs, cls))

        # OPTIONS dummy routers
        module.app.route('/', 'OPTIONS', functools.partial(options))
        module.app.route('/<url:re:.+>', 'OPTIONS', functools.partial(options))

    @root_app.hook('before_request')
    def enable_response_headers():
        for header, value in response_headers.items():
            bottle.response.headers[header] = value

    return kwargs
Exemplo n.º 16
0
def _configure(config={}, configfile=None, init_data={}):
    """
Configure the root app
"""

    if configfile:
        cfg = configfile
    else:
        cfg = os.environ.get(
            'CLUSTOCONFIG',
            '/etc/clusto/clusto.conf'
        )
    cfg = script_helper.load_config(cfg)
    clusto.connect(cfg)
#   This is an idempotent operation
    clusto.init_clusto()
#   If init_data is provided, populate it in the clusto database
    if init_data:
        for name, data in init_data.items():
            ent = clusto.get_or_create(
                name,
                data['driver'],
                **data.get('attrs', {})
            )
            for pool in data.get('member_of', []):
                clusto.get_by_name(pool).insert(ent)

    kwargs = {}
    kwargs['host'] = config.get(
        'host',
        script_helper.get_conf(
            cfg, 'apiserver.host', default='127.0.0.1'
        ),
    )
    kwargs['port'] = config.get(
        'port',
        script_helper.get_conf(
            cfg, 'apiserver.port', default=9664, datatype=int
        ),
    )
    kwargs['server'] = config.get(
        'server',
        script_helper.get_conf(
            cfg, 'apiserver.server', default='wsgiref'
        ),
    )
    kwargs['debug'] = config.get(
        'debug',
        script_helper.get_conf(
            cfg, 'apiserver.debug', default=False, datatype=bool
        )
    )
    kwargs['quiet'] = config.get(
        'quiet',
        script_helper.get_conf(
            cfg, 'apiserver.quiet', default=False, datatype=bool
        )
    )
    kwargs['reloader'] = config.get(
        'reloader',
        script_helper.get_conf(
            cfg, 'apiserver.reloader', default=False, datatype=bool
        )
    )
    mount_apps = config.get(
        'apps',
        script_helper.get_conf(
            cfg, 'apiserver.apps', default={}, datatype=dict
        )
    )

    root_app.route('/__doc__', 'GET', functools.partial(build_docs, '/', __name__))
    for mount_point, cls in mount_apps.items():
        module = importlib.import_module(cls)
        path = '/__doc__%s' % (mount_point,)
        root_app.route(path, 'GET', functools.partial(build_docs, path, cls))
        root_app.mount(mount_point, module.app)

    return kwargs
Exemplo n.º 17
0
def create(driver):
    """
Creates a new object of the given driver.

 *  Requires HTTP parameters ``name``

Example:

.. code:: bash

    $ ${post} -d 'name=createpool1' ${server_url}/entity/pool
    [
        "/pool/createpool1"
    ]
    HTTP: 201
    Content-type: application/json

Will create a new ``pool1`` object with a ``pool`` driver. If the
``pool1`` object already exists, the status code returned will be 202,
and you will see whatever warnings in the ``Warnings`` header:

.. code:: bash

    $ ${post_i} -d 'name=createpool1' ${server_url}/entity/pool
    HTTP/1.0 202 Accepted
    ...
    Warnings: Entity(s) /pool/createpool1 already exist(s)...
    [
        "/pool/createpool1"
    ]

If you try to create a server of an unknown driver, you should receive
a 412 status code back:

.. code:: bash

    $ ${post} -d 'name=createobject' ${server_url}/entity/nondriver
    "Requested driver \"nondriver\" does not exist"
    HTTP: 412
    Content-type: application/json

The following example:

.. code:: bash

    $ ${post_i} -d 'name=createpool1' -d 'name=createpool2' ${server_url}/entity/pool
    HTTP/1.0 202 Accepted
    ...
    Warnings: Entity(s) /pool/createpool1 already exist(s)...
    [
        "/pool/createpool1",
        "/pool/createpool2"
    ]

Will attempt to create new objects ``createpool1`` and ``createpool2`` with
a ``pool`` driver. As all objects are validated prior to creation, if any of
them already exists the return code will be 202 (Accepted) and you will get
an extra header ``Warnings`` with the message.

"""

    if driver not in clusto.driverlist:
        return util.dumps('Requested driver "%s" does not exist' % (driver,), 412)
    cls = clusto.driverlist[driver]
    names = request.params.getall('name')
    request.params.pop('name')

    found = []
    for name in names:
        try:
            found.append(util.unclusto(clusto.get_by_name(name)))
        except LookupError:
            pass

    result = []
    for name in names:
        result.append(util.unclusto(clusto.get_or_create(name, cls)))

    headers = {}
    if found:
        headers['Warnings'] = 'Entity(s) %s already exist(s)' % (','.join(found),)

    code = 201
    if found:
        code = 202
    return util.dumps(result, code, headers=headers)
Exemplo n.º 18
0
def _configure(config={}, configfile=None, init_data={}):
    """
Configure the root app
"""

    if configfile:
        cfg = configfile
    else:
        cfg = os.environ.get(
            'CLUSTOCONFIG',
            '/etc/clusto/clusto.conf'
        )
    cfg = script_helper.load_config(cfg)
    clusto.connect(cfg)
#   This is an idempotent operation
    clusto.init_clusto()
#   If init_data is provided, populate it in the clusto database
    if init_data:
        for name, data in init_data.items():
            ent = clusto.get_or_create(
                name,
                data['driver'],
                **data.get('attrs', {})
            )
            for pool in data.get('member_of', []):
                clusto.get_by_name(pool).insert(ent)

            for attr in data.get('attr_list', []):
                ent.set_attr(**attr)

    kwargs = {}
    kwargs['host'] = config.get(
        'host',
        script_helper.get_conf(
            cfg, 'apiserver.host', default='127.0.0.1'
        ),
    )
    kwargs['port'] = config.get(
        'port',
        script_helper.get_conf(
            cfg, 'apiserver.port', default=9664, datatype=int
        ),
    )
    kwargs['server'] = config.get(
        'server',
        script_helper.get_conf(
            cfg, 'apiserver.server', default='wsgiref'
        ),
    )
    kwargs['server_kwargs'] = config.get(
        'server_kwargs',
        script_helper.get_conf(
            cfg, 'apiserver.server_kwargs', default={}, datatype=dict
        ),
    )
    kwargs['debug'] = config.get(
        'debug',
        script_helper.get_conf(
            cfg, 'apiserver.debug', default=False, datatype=bool
        )
    )
    kwargs['quiet'] = config.get(
        'quiet',
        script_helper.get_conf(
            cfg, 'apiserver.quiet', default=False, datatype=bool
        )
    )
    kwargs['reloader'] = config.get(
        'reloader',
        script_helper.get_conf(
            cfg, 'apiserver.reloader', default=False, datatype=bool
        )
    )
    mount_apps = config.get(
        'apps',
        script_helper.get_conf(
            cfg, 'apiserver.apps', default={}, datatype=dict
        )
    )
    response_headers = config.get(
        'response_headers',
        script_helper.get_conf(
            cfg, 'apiserver.response_headers', default={}, datatype=dict
        )
    )

    root_app.route('/__doc__', 'GET', functools.partial(build_docs))
    for mount_point, cls in mount_apps.items():
        module = importlib.import_module(cls)
        root_app.mount(mount_point, module.app)

        # Documentation endpoints
        module.app.route('/__doc__', 'GET', functools.partial(build_docs, cls))
        module.app.route('/__doc__/', 'GET', functools.partial(build_docs, cls))

        # OPTIONS dummy routers
        module.app.route('/', 'OPTIONS', functools.partial(options))
        module.app.route('/<url:re:.+>', 'OPTIONS', functools.partial(options))

    @root_app.hook('before_request')
    def enable_response_headers():
        for header, value in response_headers.items():
            bottle.response.headers[header] = value

    return kwargs