def test_subnet_edge(self):
     secondaddress = network.allocate_ip("netuser", "project0",
                             utils.generate_mac())
     hostname = "toomany-hosts"
     for project in range(1,5):
         project_id = "project%s" % (project)
         mac = utils.generate_mac()
         mac2 = utils.generate_mac()
         mac3 = utils.generate_mac()
         address = network.allocate_ip(
                 "netuser", project_id, mac)
         address2 = network.allocate_ip(
                 "netuser", project_id, mac2)
         address3 = network.allocate_ip(
                 "netuser", project_id, mac3)
         self.assertEqual(False, is_in_project(address, "project0"))
         self.assertEqual(False, is_in_project(address2, "project0"))
         self.assertEqual(False, is_in_project(address3, "project0"))
         rv = network.deallocate_ip(address)
         rv = network.deallocate_ip(address2)
         rv = network.deallocate_ip(address3)
         net = network.get_project_network(project_id, "default")
         self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
         self.dnsmasq.release_ip(mac2, address2, hostname, net.bridge_name)
         self.dnsmasq.release_ip(mac3, address3, hostname, net.bridge_name)
     net = network.get_project_network("project0", "default")
     rv = network.deallocate_ip(secondaddress)
     self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
    def test_range_allocation(self):
        mac = utils.generate_mac()
        secondmac = utils.generate_mac()
        hostname = "test-host"
        address = network.allocate_ip(
                    "netuser", "project0", mac)
        secondaddress = network.allocate_ip(
                "netuser", "project1", secondmac)
        net = network.get_project_network("project0", "default")
        secondnet = network.get_project_network("project1", "default")

        self.assertEqual(True, is_in_project(address, "project0"))
        self.assertEqual(True, is_in_project(secondaddress, "project1"))
        self.assertEqual(False, is_in_project(address, "project1"))

        # Addresses are allocated before they're issued
        self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name)
        self.dnsmasq.issue_ip(secondmac, secondaddress,
                                hostname, secondnet.bridge_name)

        rv = network.deallocate_ip(address)
        self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
        self.assertEqual(False, is_in_project(address, "project0"))

        # First address release shouldn't affect the second
        self.assertEqual(True, is_in_project(secondaddress, "project1"))

        rv = network.deallocate_ip(secondaddress)
        self.dnsmasq.release_ip(secondmac, secondaddress,
                                hostname, secondnet.bridge_name)
        self.assertEqual(False, is_in_project(secondaddress, "project1"))
Esempio n. 3
0
 def test_range_allocation(self):
     address = network.allocate_ip(
             "netuser", "project0", utils.generate_mac())
     secondaddress = network.allocate_ip(
             "netuser", "project1", utils.generate_mac())
     self.assertEqual(True,
                      address in self._get_project_addresses("project0"))
     self.assertEqual(True,
                      secondaddress in self._get_project_addresses("project1"))
     self.assertEqual(False, address in self._get_project_addresses("project1"))
     rv = network.deallocate_ip(address)
     self.assertEqual(False, address in self._get_project_addresses("project0"))
     rv = network.deallocate_ip(secondaddress)
     self.assertEqual(False,
                      secondaddress in self._get_project_addresses("project1"))
Esempio n. 4
0
 def test_allocate_deallocate_ip(self):
     address = network.allocate_ip(
             "netuser", "project0", utils.generate_mac())
     logging.debug("Was allocated %s" % (address))
     self.assertEqual(True, address in self._get_project_addresses("project0"))
     rv = network.deallocate_ip(address)
     self.assertEqual(False, address in self._get_project_addresses("project0"))
Esempio n. 5
0
 def run_instances(self, context, **kwargs):
     # make sure user can access the image
     # vpn image is private so it doesn't show up on lists
     if kwargs['image_id'] != FLAGS.vpn_image_id:
         image = self._get_image(context, kwargs['image_id'])
     logging.debug("Going to run instances...")
     reservation_id = utils.generate_uid('r')
     launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
     key_data = None
     if kwargs.has_key('key_name'):
         key_pair = context.user.get_key_pair(kwargs['key_name'])
         if not key_pair:
             raise exception.ApiError('Key Pair %s not found' %
                                      kwargs['key_name'])
         key_data = key_pair.public_key
     # TODO: Get the real security group of launch in here
     security_group = "default"
     bridge_name = network.BridgedNetwork.get_network_for_project(
         context.user.id, context.project.id, security_group)['bridge_name']
     for num in range(int(kwargs['max_count'])):
         inst = self.instdir.new()
         # TODO(ja): add ari, aki
         inst['image_id'] = kwargs['image_id']
         if 'kernel_id' in kwargs:
             inst['kernel_id'] = kwargs['kernel_id']
         if 'ramdisk_id' in kwargs:
             inst['ramdisk_id'] = kwargs['ramdisk_id']
         inst['user_data'] = kwargs.get('user_data', '')
         inst['instance_type'] = kwargs.get('instance_type', 'm1.small')
         inst['reservation_id'] = reservation_id
         inst['launch_time'] = launch_time
         inst['key_data'] = key_data or ''
         inst['key_name'] = kwargs.get('key_name', '')
         inst['user_id'] = context.user.id
         inst['project_id'] = context.project.id
         inst['mac_address'] = utils.generate_mac()
         inst['ami_launch_index'] = num
         inst['bridge_name'] = bridge_name
         if inst['image_id'] == FLAGS.vpn_image_id:
             address = network.allocate_vpn_ip(inst['user_id'],
                                               inst['project_id'],
                                               mac=inst['mac_address'])
         else:
             address = network.allocate_ip(inst['user_id'],
                                           inst['project_id'],
                                           mac=inst['mac_address'])
         inst['private_dns_name'] = str(address)
         # TODO: allocate expresses on the router node
         inst.save()
         rpc.cast(
             FLAGS.compute_topic, {
                 "method": "run_instance",
                 "args": {
                     "instance_id": inst.instance_id
                 }
             })
         logging.debug("Casting to node for %s's instance with IP of %s" %
                       (context.user.name, inst['private_dns_name']))
     # TODO: Make the NetworkComputeNode figure out the network name from ip.
     return defer.succeed(self._format_instances(context, reservation_id))
    def test_allocate_deallocate_ip(self):
        address = network.allocate_ip(
                "netuser", "project0", utils.generate_mac())
        logging.debug("Was allocated %s" % (address))
        net = network.get_project_network("project0", "default")
        self.assertEqual(True, is_in_project(address, "project0"))
        mac = utils.generate_mac()
        hostname = "test-host"
        self.dnsmasq.issue_ip(mac, address, hostname, net.bridge_name)
        rv = network.deallocate_ip(address)

        # Doesn't go away until it's dhcp released
        self.assertEqual(True, is_in_project(address, "project0"))

        self.dnsmasq.release_ip(mac, address, hostname, net.bridge_name)
        self.assertEqual(False, is_in_project(address, "project0"))
Esempio n. 7
0
 def test_too_many_projects(self):
     for i in range(0, 30):
         name = 'toomany-project%s' % i
         self.manager.create_project(name, 'netuser', name)
         address = network.allocate_ip("netuser", name,
                                       utils.generate_mac())
         rv = network.deallocate_ip(address)
         self.manager.delete_project(name)
Esempio n. 8
0
 def test_range_allocation(self):
     address = network.allocate_ip("netuser", "project0",
                                   utils.generate_mac())
     secondaddress = network.allocate_ip("netuser", "project1",
                                         utils.generate_mac())
     self.assertEqual(True, address
                      in self._get_project_addresses("project0"))
     self.assertEqual(
         True, secondaddress in self._get_project_addresses("project1"))
     self.assertEqual(False, address
                      in self._get_project_addresses("project1"))
     rv = network.deallocate_ip(address)
     self.assertEqual(False, address
                      in self._get_project_addresses("project0"))
     rv = network.deallocate_ip(secondaddress)
     self.assertEqual(
         False, secondaddress in self._get_project_addresses("project1"))
Esempio n. 9
0
 def test_too_many_projects(self):
     for i in range(0, 30):
         name = 'toomany-project%s' % i
         self.manager.create_project(name, 'netuser', name)
         address = network.allocate_ip(
                 "netuser", name, utils.generate_mac())
         rv = network.deallocate_ip(address)
         self.manager.delete_project(name)
Esempio n. 10
0
 def _create_instance(self, project_num, mac=None):
     if not mac:
         mac = utils.generate_mac()
     project = self.projects[project_num]
     self.context._project = project
     self.context.project_id = project.id
     return db.instance_create(self.context,
                               {'project_id': project.id,
                                'mac_address': mac})
Esempio n. 11
0
 def test_allocate_deallocate_ip(self):
     address = network.allocate_ip("netuser", "project0",
                                   utils.generate_mac())
     logging.debug("Was allocated %s" % (address))
     self.assertEqual(True, address
                      in self._get_project_addresses("project0"))
     rv = network.deallocate_ip(address)
     self.assertEqual(False, address
                      in self._get_project_addresses("project0"))
Esempio n. 12
0
 def _create_instance(self, project_num, mac=None):
     if not mac:
         mac = utils.generate_mac()
     project = self.projects[project_num]
     self.context._project = project
     self.context.project_id = project.id
     return db.instance_create(self.context, {
         'project_id': project.id,
         'mac_address': mac
     })
Esempio n. 13
0
File: cloud.py Progetto: sorenh/cc
 def run_instances(self, context, **kwargs):
     # make sure user can access the image
     # vpn image is private so it doesn't show up on lists
     if kwargs['image_id'] != FLAGS.vpn_image_id:
         image = self._get_image(context, kwargs['image_id'])
     logging.debug("Going to run instances...")
     reservation_id = utils.generate_uid('r')
     launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
     key_data = None
     if kwargs.has_key('key_name'):
         key_pair = context.user.get_key_pair(kwargs['key_name'])
         if not key_pair:
             raise exception.ApiError('Key Pair %s not found' %
                                      kwargs['key_name'])
         key_data = key_pair.public_key
     # TODO: Get the real security group of launch in here
     security_group = "default"
     bridge_name = network.BridgedNetwork.get_network_for_project(context.user.id, context.project.id, security_group)['bridge_name']
     for num in range(int(kwargs['max_count'])):
         inst = self.instdir.new()
         # TODO(ja): add ari, aki
         inst['image_id'] = kwargs['image_id']
         if 'kernel_id' in kwargs:
             inst['kernel_id'] = kwargs['kernel_id']
         if 'ramdisk_id' in kwargs:
             inst['ramdisk_id'] = kwargs['ramdisk_id']
         inst['user_data'] = kwargs.get('user_data', '')
         inst['instance_type'] = kwargs.get('instance_type', 'm1.small')
         inst['reservation_id'] = reservation_id
         inst['launch_time'] = launch_time
         inst['key_data'] = key_data or ''
         inst['key_name'] = kwargs.get('key_name', '')
         inst['user_id'] = context.user.id
         inst['project_id'] = context.project.id
         inst['mac_address'] = utils.generate_mac()
         inst['ami_launch_index'] = num
         inst['bridge_name'] = bridge_name
         if inst['image_id'] == FLAGS.vpn_image_id:
             address = network.allocate_vpn_ip(
                     inst['user_id'], inst['project_id'], mac=inst['mac_address'])
         else:
             address = network.allocate_ip(
                     inst['user_id'], inst['project_id'], mac=inst['mac_address'])
         inst['private_dns_name'] = str(address)
         # TODO: allocate expresses on the router node
         inst.save()
         rpc.cast(FLAGS.compute_topic,
              {"method": "run_instance",
               "args": {"instance_id" : inst.instance_id}})
         logging.debug("Casting to node for %s's instance with IP of %s" %
                   (context.user.name, inst['private_dns_name']))
     # TODO: Make the NetworkComputeNode figure out the network name from ip.
     return defer.succeed(self._format_instances(
                             context, reservation_id))
Esempio n. 14
0
 def _create_instance(self, cores=2):
     """Create a test instance"""
     inst = {}
     inst["image_id"] = 1
     inst["reservation_id"] = "r-fakeres"
     inst["user_id"] = self.user.id
     inst["project_id"] = self.project.id
     inst["instance_type_id"] = "3"  # m1.large
     inst["vcpus"] = cores
     inst["mac_address"] = utils.generate_mac()
     return db.instance_create(self.context, inst)["id"]
Esempio n. 15
0
 def _create_instance(self, cores=2):
     """Create a test instance"""
     inst = {}
     inst['image_id'] = 'ami-test'
     inst['reservation_id'] = 'r-fakeres'
     inst['user_id'] = self.user.id
     inst['project_id'] = self.project.id
     inst['instance_type'] = 'm1.large'
     inst['vcpus'] = cores
     inst['mac_address'] = utils.generate_mac()
     return db.instance_create(self.context, inst)['id']
Esempio n. 16
0
 def _create_instance(self, cores=2):
     """Create a test instance"""
     inst = {}
     inst['image_id'] = 1
     inst['reservation_id'] = 'r-fakeres'
     inst['user_id'] = self.user.id
     inst['project_id'] = self.project.id
     inst['instance_type'] = 'm1.large'
     inst['vcpus'] = cores
     inst['mac_address'] = utils.generate_mac()
     return db.instance_create(self.context, inst)['id']
Esempio n. 17
0
 def test_subnet_edge(self):
     secondaddress = network.allocate_ip("netuser", "project0",
                                         utils.generate_mac())
     for project in range(1, 5):
         project_id = "project%s" % (project)
         address = network.allocate_ip("netuser", project_id,
                                       utils.generate_mac())
         address2 = network.allocate_ip("netuser", project_id,
                                        utils.generate_mac())
         address3 = network.allocate_ip("netuser", project_id,
                                        utils.generate_mac())
         self.assertEqual(
             False, address in self._get_project_addresses("project0"))
         self.assertEqual(
             False, address2 in self._get_project_addresses("project0"))
         self.assertEqual(
             False, address3 in self._get_project_addresses("project0"))
         rv = network.deallocate_ip(address)
         rv = network.deallocate_ip(address2)
         rv = network.deallocate_ip(address3)
     rv = network.deallocate_ip(secondaddress)
Esempio n. 18
0
 def test_subnet_edge(self):
     secondaddress = network.allocate_ip("netuser", "project0",
                             utils.generate_mac())
     for project in range(1,5):
         project_id = "project%s" % (project)
         address = network.allocate_ip(
                 "netuser", project_id, utils.generate_mac())
         address2 = network.allocate_ip(
                 "netuser", project_id, utils.generate_mac())
         address3 = network.allocate_ip(
                 "netuser", project_id, utils.generate_mac())
         self.assertEqual(False,
                          address in self._get_project_addresses("project0"))
         self.assertEqual(False,
                          address2 in self._get_project_addresses("project0"))
         self.assertEqual(False,
                          address3 in self._get_project_addresses("project0"))
         rv = network.deallocate_ip(address)
         rv = network.deallocate_ip(address2)
         rv = network.deallocate_ip(address3)
     rv = network.deallocate_ip(secondaddress)
Esempio n. 19
0
 def _create_instance(self):
     """Create a test instance"""
     inst = {}
     inst['image_id'] = 'ami-test'
     inst['reservation_id'] = 'r-fakeres'
     inst['launch_time'] = '10'
     inst['user_id'] = self.user.id
     inst['project_id'] = self.project.id
     inst['instance_type'] = 'm1.tiny'
     inst['mac_address'] = utils.generate_mac()
     inst['ami_launch_index'] = 0
     return db.instance_create(self.context, inst)['id']
 def create_instance(self):
     inst = model.Instance('i-test')
     inst['reservation_id'] = 'r-test'
     inst['launch_time'] = '10'
     inst['user_id'] = 'fake'
     inst['project_id'] = 'fake'
     inst['instance_type'] = 'm1.tiny'
     inst['node_name'] = FLAGS.node_name
     inst['mac_address'] = utils.generate_mac()
     inst['ami_launch_index'] = 0
     inst.save()
     return inst
Esempio n. 21
0
 def _create_instance(self, params={}):
     """Create a test instance"""
     inst = {}
     inst['image_id'] = 1
     inst['reservation_id'] = 'r-fakeres'
     inst['launch_time'] = '10'
     inst['user_id'] = self.user.id
     inst['project_id'] = self.project.id
     inst['instance_type'] = 'm1.tiny'
     inst['mac_address'] = utils.generate_mac()
     inst['ami_launch_index'] = 0
     inst.update(params)
     return db.instance_create(self.context, inst)['id']
Esempio n. 22
0
 def test_subnet_edge(self):
     """Makes sure that private ips don't overlap"""
     first = self._create_address(0)
     lease_ip(first)
     instance_ids = []
     for i in range(1, 5):
         instance_ref = self._create_instance(i, mac=utils.generate_mac())
         instance_ids.append(instance_ref['id'])
         address = self._create_address(i, instance_ref['id'])
         instance_ref = self._create_instance(i, mac=utils.generate_mac())
         instance_ids.append(instance_ref['id'])
         address2 = self._create_address(i, instance_ref['id'])
         instance_ref = self._create_instance(i, mac=utils.generate_mac())
         instance_ids.append(instance_ref['id'])
         address3 = self._create_address(i, instance_ref['id'])
         lease_ip(address)
         lease_ip(address2)
         lease_ip(address3)
         self.context._project = self.projects[i]
         self.context.project_id = self.projects[i].id
         self.assertFalse(is_allocated_in_project(address,
                                                  self.projects[0].id))
         self.assertFalse(is_allocated_in_project(address2,
                                                  self.projects[0].id))
         self.assertFalse(is_allocated_in_project(address3,
                                                  self.projects[0].id))
         self.network.deallocate_fixed_ip(self.context, address)
         self.network.deallocate_fixed_ip(self.context, address2)
         self.network.deallocate_fixed_ip(self.context, address3)
         release_ip(address)
         release_ip(address2)
         release_ip(address3)
     for instance_id in instance_ids:
         db.instance_destroy(context.get_admin_context(), instance_id)
     self.context._project = self.projects[0]
     self.context.project_id = self.projects[0].id
     self.network.deallocate_fixed_ip(self.context, first)
     self._deallocate_address(0, first)
     release_ip(first)
Esempio n. 23
0
 def test_subnet_edge(self):
     """Makes sure that private ips don't overlap"""
     first = self._create_address(0)
     lease_ip(first)
     instance_ids = []
     for i in range(1, FLAGS.num_networks):
         instance_ref = self._create_instance(i, mac=utils.generate_mac())
         instance_ids.append(instance_ref['id'])
         address = self._create_address(i, instance_ref['id'])
         instance_ref = self._create_instance(i, mac=utils.generate_mac())
         instance_ids.append(instance_ref['id'])
         address2 = self._create_address(i, instance_ref['id'])
         instance_ref = self._create_instance(i, mac=utils.generate_mac())
         instance_ids.append(instance_ref['id'])
         address3 = self._create_address(i, instance_ref['id'])
         lease_ip(address)
         lease_ip(address2)
         lease_ip(address3)
         self.context._project = self.projects[i]
         self.context.project_id = self.projects[i].id
         self.assertFalse(
             self._is_allocated_in_project(address, self.projects[0].id))
         self.assertFalse(
             self._is_allocated_in_project(address2, self.projects[0].id))
         self.assertFalse(
             self._is_allocated_in_project(address3, self.projects[0].id))
         self.network.deallocate_fixed_ip(self.context, address)
         self.network.deallocate_fixed_ip(self.context, address2)
         self.network.deallocate_fixed_ip(self.context, address3)
         release_ip(address)
         release_ip(address2)
         release_ip(address3)
     for instance_id in instance_ids:
         db.instance_destroy(context.get_admin_context(), instance_id)
     self.context._project = self.projects[0]
     self.context.project_id = self.projects[0].id
     self.network.deallocate_fixed_ip(self.context, first)
     self._deallocate_address(0, first)
     release_ip(first)
Esempio n. 24
0
 def _create_instance(self, **kwargs):
     """Create a test instance"""
     inst = {}
     inst["image_id"] = "ami-test"
     inst["reservation_id"] = "r-fakeres"
     inst["user_id"] = self.user.id
     inst["project_id"] = self.project.id
     inst["instance_type"] = "m1.tiny"
     inst["mac_address"] = utils.generate_mac()
     inst["ami_launch_index"] = 0
     inst["vcpus"] = 1
     inst["availability_zone"] = kwargs.get("availability_zone", None)
     return db.instance_create(self.context, inst)["id"]
Esempio n. 25
0
 def _create_instance(self, **kwargs):
     """Create a test instance"""
     inst = {}
     inst['image_id'] = 'ami-test'
     inst['reservation_id'] = 'r-fakeres'
     inst['user_id'] = self.user.id
     inst['project_id'] = self.project.id
     inst['instance_type'] = 'm1.tiny'
     inst['mac_address'] = utils.generate_mac()
     inst['ami_launch_index'] = 0
     inst['vcpus'] = 1
     inst['availability_zone'] = kwargs.get('availability_zone', None)
     return db.instance_create(self.context, inst)['id']
Esempio n. 26
0
 def _create_instance(self, params={}):
     """Create a test instance"""
     inst = {}
     inst['image_id'] = 1
     inst['reservation_id'] = 'r-fakeres'
     inst['launch_time'] = '10'
     inst['user_id'] = self.user.id
     inst['project_id'] = self.project.id
     type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
     inst['instance_type_id'] = type_id
     inst['mac_address'] = utils.generate_mac()
     inst['ami_launch_index'] = 0
     inst.update(params)
     return db.instance_create(self.context, inst)['id']
Esempio n. 27
0
 def create_instance(self):
     instdir = model.InstanceDirectory()
     inst = instdir.new()
     # TODO(ja): add ami, ari, aki, user_data
     inst['reservation_id'] = 'r-fakeres'
     inst['launch_time'] = '10'
     inst['user_id'] = 'fake'
     inst['project_id'] = 'fake'
     inst['instance_type'] = 'm1.tiny'
     inst['node_name'] = FLAGS.node_name
     inst['mac_address'] = utils.generate_mac()
     inst['ami_launch_index'] = 0
     inst.save()
     return inst['instance_id']
Esempio n. 28
0
 def _create_instance(self):
     """Create a test instance"""
     inst = {}
     #inst['host'] = self.host
     #inst['name'] = 'instance-1234'
     inst['image_id'] = 1
     inst['reservation_id'] = 'r-fakeres'
     inst['launch_time'] = '10'
     inst['user_id'] = self.user.id
     inst['project_id'] = self.project.id
     inst['instance_type_id'] = 1
     inst['mac_address'] = utils.generate_mac()
     inst['ami_launch_index'] = 0
     return db.instance_create(self.context, inst)['id']
Esempio n. 29
0
 def create_instance(self):
     instdir = model.InstanceDirectory()
     inst = instdir.new()
     # TODO(ja): add ami, ari, aki, user_data
     inst['reservation_id'] = 'r-fakeres'
     inst['launch_time'] = '10'
     inst['user_id'] = 'fake'
     inst['project_id'] = 'fake'
     inst['instance_type'] = 'm1.tiny'
     inst['node_name'] = FLAGS.node_name
     inst['mac_address'] = utils.generate_mac()
     inst['ami_launch_index'] = 0
     inst.save()
     return inst['instance_id']
Esempio n. 30
0
    def test_run_attach_detach_volume(self):
        """Make sure volume can be attached and detached from instance."""
        inst = {}
        inst['image_id'] = 1
        inst['reservation_id'] = 'r-fakeres'
        inst['launch_time'] = '10'
        inst['user_id'] = 'fake'
        inst['project_id'] = 'fake'
        inst['instance_type'] = 'm1.tiny'
        inst['mac_address'] = utils.generate_mac()
        inst['ami_launch_index'] = 0
        instance_id = db.instance_create(self.context, inst)['id']
        mountpoint = "/dev/sdf"
        volume_id = self._create_volume()
        self.volume.create_volume(self.context, volume_id)
        if FLAGS.fake_tests:
            db.volume_attached(self.context, volume_id, instance_id,
                               mountpoint)
        else:
            self.compute.attach_volume(self.context,
                                       instance_id,
                                       volume_id,
                                       mountpoint)
        vol = db.volume_get(context.get_admin_context(), volume_id)
        self.assertEqual(vol['status'], "in-use")
        self.assertEqual(vol['attach_status'], "attached")
        self.assertEqual(vol['mountpoint'], mountpoint)
        instance_ref = db.volume_get_instance(self.context, volume_id)
        self.assertEqual(instance_ref['id'], instance_id)

        self.assertRaises(exception.Error,
                          self.volume.delete_volume,
                          self.context,
                          volume_id)
        if FLAGS.fake_tests:
            db.volume_detached(self.context, volume_id)
        else:
            self.compute.detach_volume(self.context,
                                       instance_id,
                                       volume_id)
        vol = db.volume_get(self.context, volume_id)
        self.assertEqual(vol['status'], "available")

        self.volume.delete_volume(self.context, volume_id)
        self.assertRaises(exception.Error,
                          db.volume_get,
                          self.context,
                          volume_id)
        db.instance_destroy(self.context, instance_id)
Esempio n. 31
0
 def run_vpn_instance(self, user, **kwargs):
     kwargs['image_id'] = FLAGS.vpn_image_id
     kwargs['owner_id'] = user.id
     launchstate = self._create_reservation(user, kwargs)
     launchstate['mac_address'] = utils.generate_mac()
     (address, launchstate['network_name']) = self.network.get_cloudpipe_address(str(launchstate['owner_id']), mac=str(launchstate['mac_address']))
     launchstate['private_dns_name'] = str(address)
     pending = {}
     launchstate = self._really_run_instance(user, kwargs, 0)
     pending[kwargs['instance_id']] = dict(launchstate)
     pending[kwargs['instance_id']]['state'] = node.Instance.NOSTATE
     # TODO(vish): pending instances will be lost on crash
     if(not self.instances.has_key('pending')):
         self.instances['pending'] = {}
     self.instances['pending'].update(pending)
Esempio n. 32
0
 def run_vpn_instance(self, user, **kwargs):
     kwargs['image_id'] = FLAGS.vpn_image_id
     kwargs['owner_id'] = user.id
     launchstate = self._create_reservation(user, kwargs)
     launchstate['mac_address'] = utils.generate_mac()
     (address,
      launchstate['network_name']) = self.network.get_cloudpipe_address(
          str(launchstate['owner_id']), mac=str(launchstate['mac_address']))
     launchstate['private_dns_name'] = str(address)
     pending = {}
     launchstate = self._really_run_instance(user, kwargs, 0)
     pending[kwargs['instance_id']] = dict(launchstate)
     pending[kwargs['instance_id']]['state'] = node.Instance.NOSTATE
     # TODO(vish): pending instances will be lost on crash
     if (not self.instances.has_key('pending')):
         self.instances['pending'] = {}
     self.instances['pending'].update(pending)
    def test_too_many_addresses(self):
        """
        Network size is 32, there are 5 addresses reserved for VPN.
        So we should get 23 usable addresses
        """
        net = network.get_project_network("project0", "default")
        hostname = "toomany-hosts"
        macs = {}
        addresses = {}
        for i in range(0, 22):
            macs[i] = utils.generate_mac()
            addresses[i] = network.allocate_ip("netuser", "project0", macs[i])
            self.dnsmasq.issue_ip(macs[i], addresses[i], hostname, net.bridge_name)

        self.assertRaises(NoMoreAddresses, network.allocate_ip, "netuser", "project0", utils.generate_mac())

        for i in range(0, 22):
            rv = network.deallocate_ip(addresses[i])
            self.dnsmasq.release_ip(macs[i], addresses[i], hostname, net.bridge_name)
Esempio n. 34
0
 def run_instances(self, context, **kwargs):
     # passing all of the kwargs on to node.py
     logging.debug("Going to run instances...")
     # logging.debug(kwargs)
     # TODO: verify user has access to image
     launchstate = self._create_reservation(context.user, kwargs)
     pending = {}
     for num in range(int(launchstate['max_count'])):
         launchstate['mac_address'] = utils.generate_mac()
         (address, launchstate['network_name']) = self.network.allocate_address(str(launchstate['owner_id']), mac=str(launchstate['mac_address']))
         launchstate['private_dns_name'] = str(address)
         launchstate = self._really_run_instance(context.user, kwargs, num)
         pending[kwargs['instance_id']] = dict(launchstate)
         pending[kwargs['instance_id']]['state'] = node.Instance.NOSTATE
     # TODO(vish): pending instances will be lost on crash
     if(not self.instances.has_key('pending')):
         self.instances['pending'] = {}
     self.instances['pending'].update(pending)
     return defer.succeed(self.format_instances(context.user, launchstate['reservation_id']))
Esempio n. 35
0
 def _create_instance(self, **kwargs):
     """Create a test instance"""
     inst = {}
     inst['image_id'] = 1
     inst['reservation_id'] = 'r-fakeres'
     inst['user_id'] = self.user.id
     inst['project_id'] = self.project.id
     inst['instance_type'] = 'm1.tiny'
     inst['mac_address'] = utils.generate_mac()
     inst['vcpus'] = kwargs.get('vcpus', 1)
     inst['ami_launch_index'] = 0
     inst['availability_zone'] = kwargs.get('availability_zone', None)
     inst['host'] = kwargs.get('host', 'dummy')
     inst['memory_mb'] = kwargs.get('memory_mb', 20)
     inst['local_gb'] = kwargs.get('local_gb', 30)
     inst['launched_on'] = kwargs.get('launghed_on', 'dummy')
     inst['state_description'] = kwargs.get('state_description', 'running')
     inst['state'] = kwargs.get('state', power_state.RUNNING)
     return db.instance_create(self.context, inst)['id']
Esempio n. 36
0
    def test_run_attach_detach_volume(self):
        """Make sure volume can be attached and detached from instance."""
        inst = {}
        inst['image_id'] = 1
        inst['reservation_id'] = 'r-fakeres'
        inst['launch_time'] = '10'
        inst['user_id'] = 'fake'
        inst['project_id'] = 'fake'
        inst['instance_type'] = 'm1.tiny'
        inst['mac_address'] = utils.generate_mac()
        inst['ami_launch_index'] = 0
        instance_id = db.instance_create(self.context, inst)['id']
        mountpoint = "/dev/sdf"
        volume_id = self._create_volume()
        self.volume.create_volume(self.context, volume_id)
        if FLAGS.fake_tests:
            db.volume_attached(self.context, volume_id, instance_id,
                               mountpoint)
        else:
            self.compute.attach_volume(self.context, instance_id, volume_id,
                                       mountpoint)
        vol = db.volume_get(context.get_admin_context(), volume_id)
        self.assertEqual(vol['status'], "in-use")
        self.assertEqual(vol['attach_status'], "attached")
        self.assertEqual(vol['mountpoint'], mountpoint)
        instance_ref = db.volume_get_instance(self.context, volume_id)
        self.assertEqual(instance_ref['id'], instance_id)

        self.assertRaises(exception.Error, self.volume.delete_volume,
                          self.context, volume_id)
        if FLAGS.fake_tests:
            db.volume_detached(self.context, volume_id)
        else:
            self.compute.detach_volume(self.context, instance_id, volume_id)
        vol = db.volume_get(self.context, volume_id)
        self.assertEqual(vol['status'], "available")

        self.volume.delete_volume(self.context, volume_id)
        self.assertRaises(exception.Error, db.volume_get, self.context,
                          volume_id)
        db.instance_destroy(self.context, instance_id)
Esempio n. 37
0
 def build_server_instance(self, env, context):
     reservation = utils.generate_uid("r")
     ltime = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
     inst = self.instdir.new()
     inst["name"] = env["server"]["name"]
     inst["image_id"] = env["server"]["imageId"]
     inst["instance_type"] = env["server"]["flavorId"]
     inst["user_id"] = context["user"].id
     inst["project_id"] = context["project"].id
     inst["reservation_id"] = reservation
     inst["launch_time"] = ltime
     inst["mac_address"] = utils.generate_mac()
     address = network.allocate_ip(inst["user_id"], inst["project_id"], mac=inst["mac_address"])
     inst["private_dns_name"] = str(address)
     inst["bridge_name"] = network.BridgedNetwork.get_network_for_project(
         inst["user_id"], inst["project_id"], "default"  # security group
     )["bridge_name"]
     # key_data, key_name, ami_launch_index
     # TODO(todd): key data or root password
     inst.save()
     return inst
Esempio n. 38
0
 def run_instances(self, context, **kwargs):
     # passing all of the kwargs on to node.py
     logging.debug("Going to run instances...")
     # logging.debug(kwargs)
     # TODO: verify user has access to image
     launchstate = self._create_reservation(context.user, kwargs)
     pending = {}
     for num in range(int(launchstate['max_count'])):
         launchstate['mac_address'] = utils.generate_mac()
         (address,
          launchstate['network_name']) = self.network.allocate_address(
              str(launchstate['owner_id']),
              mac=str(launchstate['mac_address']))
         launchstate['private_dns_name'] = str(address)
         launchstate = self._really_run_instance(context.user, kwargs, num)
         pending[kwargs['instance_id']] = dict(launchstate)
         pending[kwargs['instance_id']]['state'] = node.Instance.NOSTATE
     # TODO(vish): pending instances will be lost on crash
     if (not self.instances.has_key('pending')):
         self.instances['pending'] = {}
     self.instances['pending'].update(pending)
     return defer.succeed(
         self.format_instances(context.user, launchstate['reservation_id']))
Esempio n. 39
0
    def create(
        self,
        context,
        instance_type,
        image_id,
        kernel_id=None,
        ramdisk_id=None,
        min_count=1,
        max_count=1,
        display_name="",
        display_description="",
        key_name=None,
        key_data=None,
        security_group="default",
        availability_zone=None,
        user_data=None,
        onset_files=None,
    ):
        """Create the number of instances requested if quota and
        other arguments check out ok.
        """
        type_data = instance_types.INSTANCE_TYPES[instance_type]
        num_instances = quota.allowed_instances(context, max_count, type_data)
        if num_instances < min_count:
            pid = context.project_id
            LOG.warn(_("Quota exceeeded for %(pid)s," " tried to run %(min_count)s instances") % locals())
            raise quota.QuotaError(
                _("Instance quota exceeded. You can only " "run %s more instances of this type.") % num_instances,
                "InstanceLimitExceeded",
            )

        image = self.image_service.show(context, image_id)
        if kernel_id is None:
            kernel_id = image.get("kernel_id", None)
        if ramdisk_id is None:
            ramdisk_id = image.get("ramdisk_id", None)
        # No kernel and ramdisk for raw images
        if kernel_id == str(FLAGS.null_kernel):
            kernel_id = None
            ramdisk_id = None
            LOG.debug(_("Creating a raw instance"))
        # Make sure we have access to kernel and ramdisk (if not raw)
        logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id))
        if kernel_id:
            self.image_service.show(context, kernel_id)
        if ramdisk_id:
            self.image_service.show(context, ramdisk_id)

        if security_group is None:
            security_group = ["default"]
        if not type(security_group) is list:
            security_group = [security_group]

        security_groups = []
        self.ensure_default_security_group(context)
        for security_group_name in security_group:
            group = db.security_group_get_by_name(context, context.project_id, security_group_name)
            security_groups.append(group["id"])

        if key_data is None and key_name:
            key_pair = db.key_pair_get(context, context.user_id, key_name)
            key_data = key_pair["public_key"]

        base_options = {
            "reservation_id": utils.generate_uid("r"),
            "image_id": image_id,
            "kernel_id": kernel_id or "",
            "ramdisk_id": ramdisk_id or "",
            "state_description": "scheduling",
            "user_id": context.user_id,
            "project_id": context.project_id,
            "launch_time": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
            "instance_type": instance_type,
            "memory_mb": type_data["memory_mb"],
            "vcpus": type_data["vcpus"],
            "local_gb": type_data["local_gb"],
            "display_name": display_name,
            "display_description": display_description,
            "user_data": user_data or "",
            "key_name": key_name,
            "key_data": key_data,
            "locked": False,
            "availability_zone": availability_zone,
        }
        elevated = context.elevated()
        instances = []
        LOG.debug(_("Going to run %s instances..."), num_instances)
        for num in range(num_instances):
            instance = dict(mac_address=utils.generate_mac(), launch_index=num, **base_options)
            instance = self.db.instance_create(context, instance)
            instance_id = instance["id"]

            elevated = context.elevated()
            if not security_groups:
                security_groups = []
            for security_group_id in security_groups:
                self.db.instance_add_security_group(elevated, instance_id, security_group_id)

            # Set sane defaults if not specified
            updates = dict(hostname=self.hostname_factory(instance_id))
            if not hasattr(instance, "display_name") or instance.display_name == None:
                updates["display_name"] = "Server %s" % instance_id

            instance = self.update(context, instance_id, **updates)
            instances.append(instance)

            pid = context.project_id
            uid = context.user_id
            LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's" " instance %(instance_id)s") % locals())
            rpc.cast(
                context,
                FLAGS.scheduler_topic,
                {
                    "method": "run_instance",
                    "args": {
                        "topic": FLAGS.compute_topic,
                        "instance_id": instance_id,
                        "availability_zone": availability_zone,
                        "onset_files": onset_files,
                    },
                },
            )

        for group_id in security_groups:
            self.trigger_security_group_members_refresh(elevated, group_id)

        return [dict(x.iteritems()) for x in instances]
Esempio n. 40
0
File: api.py Progetto: ed-/reddwarf
    def create(
        self,
        context,
        instance_type,
        image_id,
        kernel_id=None,
        ramdisk_id=None,
        min_count=1,
        max_count=1,
        display_name="",
        display_description="",
        key_name=None,
        key_data=None,
        security_group="default",
        availability_zone=None,
        user_data=None,
        metadata={},
        injected_files=None,
        admin_password=None,
    ):
        """Create the number and type of instances requested.

        Verifies that quota and other arguments are valid.

        """
        if not instance_type:
            instance_type = instance_types.get_default_instance_type()

        num_instances = quota.allowed_instances(context, max_count, instance_type)
        if num_instances < min_count:
            pid = context.project_id
            LOG.warn(_("Quota exceeeded for %(pid)s," " tried to run %(min_count)s instances") % locals())
            if num_instances <= 0:
                message = _("Instance quota exceeded. You cannot run any " "more instances of this type.")
            else:
                message = (
                    _("Instance quota exceeded. You can only run %s " "more instances of this type.") % num_instances
                )
            raise quota.QuotaError(message, "InstanceLimitExceeded")

        self._check_metadata_properties_quota(context, metadata)
        self._check_injected_file_quota(context, injected_files)

        image = self.image_service.show(context, image_id)

        os_type = None
        if "properties" in image and "os_type" in image["properties"]:
            os_type = image["properties"]["os_type"]

        if kernel_id is None:
            kernel_id = image["properties"].get("kernel_id", None)
        if ramdisk_id is None:
            ramdisk_id = image["properties"].get("ramdisk_id", None)
        # FIXME(sirp): is there a way we can remove null_kernel?
        # No kernel and ramdisk for raw images
        if kernel_id == str(FLAGS.null_kernel):
            kernel_id = None
            ramdisk_id = None
            LOG.debug(_("Creating a raw instance"))
        # Make sure we have access to kernel and ramdisk (if not raw)
        logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id))
        if kernel_id:
            self.image_service.show(context, kernel_id)
        if ramdisk_id:
            self.image_service.show(context, ramdisk_id)

        if security_group is None:
            security_group = ["default"]
        if not type(security_group) is list:
            security_group = [security_group]

        security_groups = []
        self.ensure_default_security_group(context)
        for security_group_name in security_group:
            group = db.security_group_get_by_name(context, context.project_id, security_group_name)
            security_groups.append(group["id"])

        if key_data is None and key_name:
            key_pair = db.key_pair_get(context, context.user_id, key_name)
            key_data = key_pair["public_key"]

        base_options = {
            "reservation_id": utils.generate_uid("r"),
            "image_id": image_id,
            "kernel_id": kernel_id or "",
            "ramdisk_id": ramdisk_id or "",
            "state": 0,
            "state_description": "scheduling",
            "user_id": context.user_id,
            "project_id": context.project_id,
            "launch_time": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
            "instance_type_id": instance_type["id"],
            "memory_mb": instance_type["memory_mb"],
            "vcpus": instance_type["vcpus"],
            "local_gb": instance_type["local_gb"],
            "display_name": display_name,
            "display_description": display_description,
            "user_data": user_data or "",
            "key_name": key_name,
            "key_data": key_data,
            "locked": False,
            "metadata": metadata,
            "availability_zone": availability_zone,
            "os_type": os_type,
        }
        elevated = context.elevated()
        instances = []
        LOG.debug(_("Going to run %s instances..."), num_instances)
        for num in range(num_instances):
            instance = dict(mac_address=utils.generate_mac(), launch_index=num, **base_options)
            instance = self.db.instance_create(context, instance)
            instance_id = instance["id"]

            elevated = context.elevated()
            if not security_groups:
                security_groups = []
            for security_group_id in security_groups:
                self.db.instance_add_security_group(elevated, instance_id, security_group_id)

            # Set sane defaults if not specified
            updates = dict(hostname=self.hostname_factory(instance_id))
            if not hasattr(instance, "display_name") or instance.display_name is None:
                updates["display_name"] = "Server %s" % instance_id

            instance = self.update(context, instance_id, **updates)
            instances.append(instance)

            pid = context.project_id
            uid = context.user_id
            LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's" " instance %(instance_id)s") % locals())

            # NOTE(sandy): For now we're just going to pass in the
            # instance_type record to the scheduler. In a later phase
            # we'll be ripping this whole for-loop out and deferring the
            # creation of the Instance record. At that point all this will
            # change.
            rpc.cast(
                context,
                FLAGS.scheduler_topic,
                {
                    "method": "run_instance",
                    "args": {
                        "topic": FLAGS.compute_topic,
                        "instance_id": instance_id,
                        "instance_type": instance_type,
                        "availability_zone": availability_zone,
                        "injected_files": injected_files,
                        "admin_password": admin_password,
                    },
                },
            )

        for group_id in security_groups:
            self.trigger_security_group_members_refresh(elevated, group_id)

        return [dict(x.iteritems()) for x in instances]
Esempio n. 41
0
File: api.py Progetto: yosh/nova
    def create(self, context, instance_type,
               image_id, kernel_id=None, ramdisk_id=None,
               min_count=1, max_count=1,
               display_name='', display_description='',
               key_name=None, key_data=None, security_group='default',
               availability_zone=None, user_data=None, metadata=[],
               onset_files=None):
        """Create the number of instances requested if quota and
        other arguments check out ok."""

        type_data = instance_types.get_instance_type(instance_type)
        num_instances = quota.allowed_instances(context, max_count, type_data)
        if num_instances < min_count:
            pid = context.project_id
            LOG.warn(_("Quota exceeeded for %(pid)s,"
                    " tried to run %(min_count)s instances") % locals())
            raise quota.QuotaError(_("Instance quota exceeded. You can only "
                                     "run %s more instances of this type.") %
                                   num_instances, "InstanceLimitExceeded")

        num_metadata = len(metadata)
        quota_metadata = quota.allowed_metadata_items(context, num_metadata)
        if quota_metadata < num_metadata:
            pid = context.project_id
            msg = (_("Quota exceeeded for %(pid)s,"
                     " tried to set %(num_metadata)s metadata properties")
                   % locals())
            LOG.warn(msg)
            raise quota.QuotaError(msg, "MetadataLimitExceeded")

        # Because metadata is stored in the DB, we hard-code the size limits
        # In future, we may support more variable length strings, so we act
        #  as if this is quota-controlled for forwards compatibility
        for metadata_item in metadata:
            k = metadata_item['key']
            v = metadata_item['value']
            if len(k) > 255 or len(v) > 255:
                pid = context.project_id
                msg = (_("Quota exceeeded for %(pid)s,"
                         " metadata property key or value too long")
                       % locals())
                LOG.warn(msg)
                raise quota.QuotaError(msg, "MetadataLimitExceeded")

        image = self.image_service.show(context, image_id)
        if kernel_id is None:
            kernel_id = image.get('kernel_id', None)
        if ramdisk_id is None:
            ramdisk_id = image.get('ramdisk_id', None)
        # FIXME(sirp): is there a way we can remove null_kernel?
        # No kernel and ramdisk for raw images
        if kernel_id == str(FLAGS.null_kernel):
            kernel_id = None
            ramdisk_id = None
            LOG.debug(_("Creating a raw instance"))
        # Make sure we have access to kernel and ramdisk (if not raw)
        logging.debug("Using Kernel=%s, Ramdisk=%s" %
                       (kernel_id, ramdisk_id))
        if kernel_id:
            self.image_service.show(context, kernel_id)
        if ramdisk_id:
            self.image_service.show(context, ramdisk_id)

        if security_group is None:
            security_group = ['default']
        if not type(security_group) is list:
            security_group = [security_group]

        security_groups = []
        self.ensure_default_security_group(context)
        for security_group_name in security_group:
            group = db.security_group_get_by_name(context,
                                                  context.project_id,
                                                  security_group_name)
            security_groups.append(group['id'])

        if key_data is None and key_name:
            key_pair = db.key_pair_get(context, context.user_id, key_name)
            key_data = key_pair['public_key']

        base_options = {
            'reservation_id': utils.generate_uid('r'),
            'image_id': image_id,
            'kernel_id': kernel_id or '',
            'ramdisk_id': ramdisk_id or '',
            'state_description': 'scheduling',
            'user_id': context.user_id,
            'project_id': context.project_id,
            'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
            'instance_type': instance_type,
            'memory_mb': type_data['memory_mb'],
            'vcpus': type_data['vcpus'],
            'local_gb': type_data['local_gb'],
            'display_name': display_name,
            'display_description': display_description,
            'user_data': user_data or '',
            'key_name': key_name,
            'key_data': key_data,
            'locked': False,
            'metadata': metadata,
            'availability_zone': availability_zone}
        elevated = context.elevated()
        instances = []
        LOG.debug(_("Going to run %s instances..."), num_instances)
        for num in range(num_instances):
            instance = dict(mac_address=utils.generate_mac(),
                            launch_index=num,
                            **base_options)
            instance = self.db.instance_create(context, instance)
            instance_id = instance['id']

            elevated = context.elevated()
            if not security_groups:
                security_groups = []
            for security_group_id in security_groups:
                self.db.instance_add_security_group(elevated,
                                                    instance_id,
                                                    security_group_id)

            # Set sane defaults if not specified
            updates = dict(hostname=self.hostname_factory(instance_id))
            if (not hasattr(instance, 'display_name') or
                    instance.display_name == None):
                updates['display_name'] = "Server %s" % instance_id

            instance = self.update(context, instance_id, **updates)
            instances.append(instance)

            pid = context.project_id
            uid = context.user_id
            LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's"
                    " instance %(instance_id)s") % locals())
            rpc.cast(context,
                     FLAGS.scheduler_topic,
                     {"method": "run_instance",
                      "args": {"topic": FLAGS.compute_topic,
                               "instance_id": instance_id,
                               "availability_zone": availability_zone,
                               "onset_files": onset_files}})

        for group_id in security_groups:
            self.trigger_security_group_members_refresh(elevated, group_id)

        return [dict(x.iteritems()) for x in instances]
Esempio n. 42
0
    def create(self, context, instance_type,
               image_id, kernel_id=None, ramdisk_id=None,
               min_count=1, max_count=1,
               display_name='', display_description='',
               key_name=None, key_data=None, security_group='default',
               availability_zone=None, user_data=None, metadata={},
               injected_files=None):
        """Create the number and type of instances requested.

        Verifies that quota and other arguments are valid.

        """
        if not instance_type:
            instance_type = instance_types.get_default_instance_type()

        num_instances = quota.allowed_instances(context, max_count,
                                                instance_type)
        if num_instances < min_count:
            pid = context.project_id
            LOG.warn(_("Quota exceeeded for %(pid)s,"
                    " tried to run %(min_count)s instances") % locals())
            raise quota.QuotaError(_("Instance quota exceeded. You can only "
                                     "run %s more instances of this type.") %
                                   num_instances, "InstanceLimitExceeded")

        self._check_metadata_properties_quota(context, metadata)
        self._check_injected_file_quota(context, injected_files)

        image = self.image_service.show(context, image_id)

        os_type = None
        if 'properties' in image and 'os_type' in image['properties']:
            os_type = image['properties']['os_type']

        if kernel_id is None:
            kernel_id = image['properties'].get('kernel_id', None)
        if ramdisk_id is None:
            ramdisk_id = image['properties'].get('ramdisk_id', None)
        # FIXME(sirp): is there a way we can remove null_kernel?
        # No kernel and ramdisk for raw images
        if kernel_id == str(FLAGS.null_kernel):
            kernel_id = None
            ramdisk_id = None
            LOG.debug(_("Creating a raw instance"))
        # Make sure we have access to kernel and ramdisk (if not raw)
        logging.debug("Using Kernel=%s, Ramdisk=%s" %
                       (kernel_id, ramdisk_id))
        if kernel_id:
            self.image_service.show(context, kernel_id)
        if ramdisk_id:
            self.image_service.show(context, ramdisk_id)

        if security_group is None:
            security_group = ['default']
        if not type(security_group) is list:
            security_group = [security_group]

        security_groups = []
        self.ensure_default_security_group(context)
        for security_group_name in security_group:
            group = db.security_group_get_by_name(context,
                                                  context.project_id,
                                                  security_group_name)
            security_groups.append(group['id'])

        if key_data is None and key_name:
            key_pair = db.key_pair_get(context, context.user_id, key_name)
            key_data = key_pair['public_key']

        base_options = {
            'reservation_id': utils.generate_uid('r'),
            'image_id': image_id,
            'kernel_id': kernel_id or '',
            'ramdisk_id': ramdisk_id or '',
            'state': 0,
            'state_description': 'scheduling',
            'user_id': context.user_id,
            'project_id': context.project_id,
            'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
            'instance_type_id': instance_type['id'],
            'memory_mb': instance_type['memory_mb'],
            'vcpus': instance_type['vcpus'],
            'local_gb': instance_type['local_gb'],
            'display_name': display_name,
            'display_description': display_description,
            'user_data': user_data or '',
            'key_name': key_name,
            'key_data': key_data,
            'locked': False,
            'metadata': metadata,
            'availability_zone': availability_zone,
            'os_type': os_type}
        elevated = context.elevated()
        instances = []
        LOG.debug(_("Going to run %s instances..."), num_instances)
        for num in range(num_instances):
            instance = dict(mac_address=utils.generate_mac(),
                            launch_index=num,
                            **base_options)
            instance = self.db.instance_create(context, instance)
            instance_id = instance['id']

            elevated = context.elevated()
            if not security_groups:
                security_groups = []
            for security_group_id in security_groups:
                self.db.instance_add_security_group(elevated,
                                                    instance_id,
                                                    security_group_id)

            # Set sane defaults if not specified
            updates = dict(hostname=self.hostname_factory(instance_id))
            if (not hasattr(instance, 'display_name') or
                    instance.display_name is None):
                updates['display_name'] = "Server %s" % instance_id

            instance = self.update(context, instance_id, **updates)
            instances.append(instance)

            pid = context.project_id
            uid = context.user_id
            LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's"
                    " instance %(instance_id)s") % locals())
            rpc.cast(context,
                     FLAGS.scheduler_topic,
                     {"method": "run_instance",
                      "args": {"topic": FLAGS.compute_topic,
                               "instance_id": instance_id,
                               "availability_zone": availability_zone,
                               "injected_files": injected_files}})

        for group_id in security_groups:
            self.trigger_security_group_members_refresh(elevated, group_id)

        return [dict(x.iteritems()) for x in instances]
Esempio n. 43
0
    def create(self,
               context,
               instance_type,
               image_id,
               kernel_id=None,
               ramdisk_id=None,
               min_count=1,
               max_count=1,
               display_name='',
               display_description='',
               key_name=None,
               key_data=None,
               security_group='default',
               availability_zone=None,
               user_data=None,
               metadata={},
               injected_files=None):
        """Create the number and type of instances requested.

        Verifies that quota and other arguments are valid.

        """
        if not instance_type:
            instance_type = instance_types.get_default_instance_type()

        num_instances = quota.allowed_instances(context, max_count,
                                                instance_type)
        if num_instances < min_count:
            pid = context.project_id
            LOG.warn(
                _("Quota exceeeded for %(pid)s,"
                  " tried to run %(min_count)s instances") % locals())
            raise quota.QuotaError(
                _("Instance quota exceeded. You can only "
                  "run %s more instances of this type.") % num_instances,
                "InstanceLimitExceeded")

        self._check_metadata_properties_quota(context, metadata)
        self._check_injected_file_quota(context, injected_files)

        image = self.image_service.show(context, image_id)

        os_type = None
        if 'properties' in image and 'os_type' in image['properties']:
            os_type = image['properties']['os_type']

        if kernel_id is None:
            kernel_id = image['properties'].get('kernel_id', None)
        if ramdisk_id is None:
            ramdisk_id = image['properties'].get('ramdisk_id', None)
        # FIXME(sirp): is there a way we can remove null_kernel?
        # No kernel and ramdisk for raw images
        if kernel_id == str(FLAGS.null_kernel):
            kernel_id = None
            ramdisk_id = None
            LOG.debug(_("Creating a raw instance"))
        # Make sure we have access to kernel and ramdisk (if not raw)
        logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id))
        if kernel_id:
            self.image_service.show(context, kernel_id)
        if ramdisk_id:
            self.image_service.show(context, ramdisk_id)

        if security_group is None:
            security_group = ['default']
        if not type(security_group) is list:
            security_group = [security_group]

        security_groups = []
        self.ensure_default_security_group(context)
        for security_group_name in security_group:
            group = db.security_group_get_by_name(context, context.project_id,
                                                  security_group_name)
            security_groups.append(group['id'])

        if key_data is None and key_name:
            key_pair = db.key_pair_get(context, context.user_id, key_name)
            key_data = key_pair['public_key']

        base_options = {
            'reservation_id': utils.generate_uid('r'),
            'image_id': image_id,
            'kernel_id': kernel_id or '',
            'ramdisk_id': ramdisk_id or '',
            'state': 0,
            'state_description': 'scheduling',
            'user_id': context.user_id,
            'project_id': context.project_id,
            'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
            'instance_type_id': instance_type['id'],
            'memory_mb': instance_type['memory_mb'],
            'vcpus': instance_type['vcpus'],
            'local_gb': instance_type['local_gb'],
            'display_name': display_name,
            'display_description': display_description,
            'user_data': user_data or '',
            'key_name': key_name,
            'key_data': key_data,
            'locked': False,
            'metadata': metadata,
            'availability_zone': availability_zone,
            'os_type': os_type
        }
        elevated = context.elevated()
        instances = []
        LOG.debug(_("Going to run %s instances..."), num_instances)
        for num in range(num_instances):
            instance = dict(mac_address=utils.generate_mac(),
                            launch_index=num,
                            **base_options)
            instance = self.db.instance_create(context, instance)
            instance_id = instance['id']

            elevated = context.elevated()
            if not security_groups:
                security_groups = []
            for security_group_id in security_groups:
                self.db.instance_add_security_group(elevated, instance_id,
                                                    security_group_id)

            # Set sane defaults if not specified
            updates = dict(hostname=self.hostname_factory(instance_id))
            if (not hasattr(instance, 'display_name')
                    or instance.display_name is None):
                updates['display_name'] = "Server %s" % instance_id

            instance = self.update(context, instance_id, **updates)
            instances.append(instance)

            pid = context.project_id
            uid = context.user_id
            LOG.debug(
                _("Casting to scheduler for %(pid)s/%(uid)s's"
                  " instance %(instance_id)s") % locals())
            rpc.cast(
                context, FLAGS.scheduler_topic, {
                    "method": "run_instance",
                    "args": {
                        "topic": FLAGS.compute_topic,
                        "instance_id": instance_id,
                        "availability_zone": availability_zone,
                        "injected_files": injected_files
                    }
                })

        for group_id in security_groups:
            self.trigger_security_group_members_refresh(elevated, group_id)

        return [dict(x.iteritems()) for x in instances]