def test_most_matching_images_big(self, mock_get_meta_all):
        candidates = ['node1_net', 'node2', 'node3', 'node4']

        finalists = scheduler.Scheduler()._find_most_matching_images(
                    ['req_image1'], candidates)
        self.assertSetEqual(set(['node1_net']), set(finalists))

        finalists = scheduler.Scheduler()._find_most_matching_images(
                    ['req_image1', 'req_image2'], candidates)
        self.assertSetEqual(set(['node1_net']), set(finalists))

        finalists = scheduler.Scheduler()._find_most_matching_images(
                    ['req_image2'], candidates)
        self.assertSetEqual(set(['node1_net', 'node2', 'node3']), set(finalists))
示例#2
0
    def test_any_node_but_not_network_node(self, mock_get_image_meta):
        self.fake_db.set_node_metrics_same({
            'cpu_max_per_instance':
            16,
            'cpu_max':
            4,
            'memory_available':
            22000,
            'memory_max':
            24000,
            'disk_free':
            2000 * 1024 * 1024 * 1024
        })

        fake_inst = FakeInstance()
        fake_inst.db_setup(
            cpus=1,
            memory=1024,
            block_devices={'devices': [{
                'size': 8,
                'base': 'some-os'
            }]},
        )
        nets = [{'network_uuid': 'uuid-net2'}]

        nodes = scheduler.Scheduler().place_instance(fake_inst, nets)
        self.assertSetEqual(
            set(self.fake_db.nodes) - {
                'node1_net',
            }, set(nodes))
示例#3
0
    def test_no_metrics(self):
        fake_inst = FakeInstance(cpus=1)

        exc = self.assertRaises(exceptions.LowResourceException,
                                scheduler.Scheduler().place_instance,
                                fake_inst, [])
        self.assertEqual('No nodes with metrics', str(exc))
示例#4
0
    def test_not_enough_ram_on_node(self):
        self.fake_db.set_node_metrics_same({
            'cpu_max_per_instance':
            16,
            'cpu_max':
            4,
            'memory_available':
            10000,
            'memory_max':
            10000,
            'memory_total_instance_actual':
            15001,
            'disk_free':
            2000 * 1024 * 1024 * 1024,
        })

        fake_inst = FakeInstance(
            cpus=1,
            memory=1,
            block_devices={'devices': [{
                'size': 21,
                'base': 'some-os'
            }]})

        exc = self.assertRaises(exceptions.LowResourceException,
                                scheduler.Scheduler().place_instance,
                                fake_inst, [])
        self.assertEqual('No nodes with enough idle RAM', str(exc))
示例#5
0
    def test_ok(self, mock_get_image_meta):
        self.fake_db.set_node_metrics_same({
            'cpu_max_per_instance':
            16,
            'cpu_max':
            4,
            'memory_available':
            22000,
            'memory_max':
            24000,
            'disk_free':
            2000 * 1024 * 1024 * 1024
        })

        fake_inst = FakeInstance(
            cpus=1,
            memory=1024,
            block_devices={'devices': [{
                'size': 8,
                'base': 'some-os'
            }]})

        nodes = scheduler.Scheduler().place_instance(fake_inst, [])
        self.assertSetEqual(
            set(self.fake_db.nodes) - {
                'node1_net',
            }, set(nodes))
示例#6
0
    def test_refresh(self):
        self.mock_etcd.set_node_metrics_same({
            'cpu_max_per_instance': 16,
            'cpu_max': 4,
            'memory_available': 22000,
            'memory_max': 24000,
            'disk_free_instances': 2000 * GiB,
            'cpu_total_instance_vcpus': 4,
            'cpu_available': 12
        })

        fake_inst = self.mock_etcd.create_instance('fake-inst', 'fakeuuid')

        s = scheduler.Scheduler()
        s.place_instance(fake_inst, None)
        self.assertEqual(22000, s.metrics['node1_net']['memory_available'])

        self.mock_etcd.set_node_metrics_same({
            'cpu_max_per_instance': 16,
            'cpu_max': 4,
            'memory_available': 11000,
            'memory_max': 24000,
            'disk_free_instances': 2000 * GiB,
            'cpu_total_instance_vcpus': 4,
            'cpu_available': 12
        })
        s.metrics_updated = time.time() - 400
        s.place_instance(fake_inst, None)
        self.assertEqual(11000, s.metrics['node1_net']['memory_available'])
示例#7
0
    def test_single_node_that_has_network(self, mock_get_image_meta):
        self.fake_db.set_node_metrics_same({
            'cpu_max_per_instance':
            16,
            'cpu_max':
            4,
            'memory_available':
            22000,
            'memory_max':
            24000,
            'disk_free':
            2000 * 1024 * 1024 * 1024
        })

        fake_inst = FakeInstance(
            cpus=1,
            memory=1024,
            block_devices={'devices': [{
                'size': 8,
                'base': 'some-os'
            }]})
        nets = [{'network_uuid': 'uuid-net1'}]

        nodes = scheduler.Scheduler().place_instance(fake_inst, nets)
        self.assertSetEqual(set(['node3']), set(nodes))
示例#8
0
 def test_only_two(self):
     fake_inst = self.mock_etcd.create_instance('fake-inst', 'fakeuuid')
     nodes = scheduler.Scheduler().place_instance(
         fake_inst, [], candidates=['node1_net', 'node2'])
     self.assertSetEqual({
         'node2',
     }, set(nodes))
示例#9
0
def instance_preflight(instance_uuid, network):
    db.update_instance_state(instance_uuid, 'preflight')

    s = scheduler.Scheduler()
    instance = virt.from_db(instance_uuid)

    try:
        s.place_instance(instance, network, candidates=[config.NODE_NAME])
        return None

    except exceptions.LowResourceException as e:
        db.add_event('instance', instance_uuid, 'schedule', 'retry', None,
                     'insufficient resources: ' + str(e))

    if instance.db_entry.get('placement_attempts') > 3:
        raise exceptions.AbortInstanceStartException('Too many start attempts')

    try:
        if instance.db_entry.get('requested_placement'):
            candidates = [instance.db_entry.get('requested_placement')]
        else:
            candidates = []
            for node in s.metrics.keys():
                if node != config.NODE_NAME:
                    candidates.append(node)

        candidates = s.place_instance(instance, network, candidates=candidates)
        return candidates[0]

    except exceptions.LowResourceException as e:
        db.add_event('instance', instance_uuid, 'schedule', 'failed', None,
                     'insufficient resources: ' + str(e))
        # This raise implies delete above
        raise exceptions.AbortInstanceStartException(
            'Unable to find suitable node')
示例#10
0
    def test_ok(self):
        self.mock_etcd.set_node_metrics_same()

        fake_inst = self.mock_etcd.create_instance('fake-inst', 'fakeuuid')

        nodes = scheduler.Scheduler().place_instance(fake_inst, [])
        self.assertSetEqual(
            set(self.mock_etcd.node_names) - {
                'node1_net',
            }, set(nodes))
示例#11
0
    def test_requested_too_many_cpu(self):
        self.fake_db.set_node_metrics_same({
            'cpu_max_per_instance': 5,
        })

        fake_inst = FakeInstance(cpus=6)

        exc = self.assertRaises(exceptions.LowResourceException,
                                scheduler.Scheduler().place_instance,
                                fake_inst, [])
        self.assertEqual('Requested vCPUs exceeds vCPU limit', str(exc))
示例#12
0
    def test_requested_too_many_cpu(self):
        self.mock_etcd.set_node_metrics_same({
            'cpu_max_per_instance': 5,
            'cpu_total_instance_vcpus': 4,
            'cpu_available': 12
        })

        fake_inst = self.mock_etcd.create_instance('fake-inst',
                                                   'fakeuuid',
                                                   cpus=6)
        exc = self.assertRaises(exceptions.LowResourceException,
                                scheduler.Scheduler().place_instance,
                                fake_inst, [])
        self.assertEqual('Requested vCPUs exceeds vCPU limit', str(exc))
示例#13
0
    def test_any_node_but_not_network_node(self):
        self.mock_etcd.create_instance('instance-1',
                                       'uuid-inst-1',
                                       place_on_node='node3')
        self.mock_etcd.set_node_metrics_same()

        fake_inst = self.mock_etcd.create_instance('fake-inst', 'fakeuuid')
        nets = [{'network_uuid': 'uuid-net2'}]

        nodes = scheduler.Scheduler().place_instance(fake_inst, nets)
        self.assertSetEqual(
            set(self.mock_etcd.node_names) - {
                'node1_net',
            }, set(nodes))
示例#14
0
    def test_not_enough_ram_on_node(self):
        self.mock_etcd.set_node_metrics_same({
            'cpu_max_per_instance': 16,
            'cpu_max': 4,
            'memory_available': 10000,
            'memory_max': 10000,
            'memory_total_instance_actual': 15001,
            'disk_free_instances': 2000 * GiB,
            'cpu_total_instance_vcpus': 4,
            'cpu_available': 12
        })

        fake_inst = self.mock_etcd.create_instance('fake-inst', 'fakeuuid')
        exc = self.assertRaises(exceptions.LowResourceException,
                                scheduler.Scheduler().place_instance,
                                fake_inst, [])
        self.assertEqual('No nodes with enough idle RAM', str(exc))
    def test_not_enough_cpu(self):
        self.fake_db.set_node_metrics_same({
            'cpu_max_per_instance': 16,
            'cpu_max': 4,
            'cpu_total_instance_vcpus': 4*16,
            'memory_available': 5*1024+1024-1,
            'memory_max': 24000,
            'disk_free': 2000*1024*1024*1024
        })

        fake_inst = FakeInstance()
        fake_inst.db_setup(cpus=1)

        exc = self.assertRaises(exceptions.LowResourceException,
                                scheduler.Scheduler().place_instance,
                                fake_inst,
                                [])
        self.assertEqual('No nodes with enough idle CPU', str(exc))
示例#16
0
def instance_preflight(instance_uuid, network):
    instance = virt.Instance.from_db(instance_uuid)
    if not instance:
        raise exceptions.InstanceNotInDBException(instance_uuid)

    instance.update_instance_state('preflight')

    # Try to place on this node
    s = scheduler.Scheduler()
    try:
        s.place_instance(instance, network, candidates=[config.NODE_NAME])
        return None

    except exceptions.LowResourceException as e:
        instance.add_event('schedule', 'retry', None,
                           'insufficient resources: ' + str(e))

    # Unsuccessful placement, check if reached placement attempt limit
    if instance.placement_attempts > 3:
        raise exceptions.AbortInstanceStartException(
            'Too many start attempts')

    # Try placing on another node
    try:
        if instance.requested_placement:
            # TODO(andy): Ask Mikal why this is not the current node?
            candidates = [instance.requested_placement]
        else:
            candidates = []
            for node in s.metrics.keys():
                if node != config.NODE_NAME:
                    candidates.append(node)

        candidates = s.place_instance(instance, network,
                                      candidates=candidates)
        instance.place_instance(candidates[0])
        return candidates[0]

    except exceptions.LowResourceException as e:
        instance.add_event('schedule', 'failed', None,
                           'insufficient resources: ' + str(e))
        # This raise implies delete above
        raise exceptions.AbortInstanceStartException(
            'Unable to find suitable node')
示例#17
0
    def test_anti_affinity_single_inst(self):
        self.mock_etcd.create_instance('instance-1',
                                       'uuid-inst-1',
                                       place_on_node='node3',
                                       metadata={'tags': ['nerd']})

        # Start test
        inst = self.mock_etcd.create_instance('instance-3',
                                              'uuid-inst-3',
                                              metadata={
                                                  "affinity": {
                                                      "cpu": {
                                                          "socialite": 2,
                                                          "nerd": -100,
                                                      }
                                                  },
                                              })
        nodes = scheduler.Scheduler().place_instance(inst, [])
        self.assertSetEqual({'node2', 'node4'}, set(nodes))
    def test_not_enough_disk(self):
        self.fake_db.set_node_metrics_same({
            'cpu_max_per_instance': 16,
            'cpu_max': 4,
            'memory_available': 22000,
            'memory_max': 24000,
            'disk_free': 20*1024*1024*1024
        })

        fake_inst = FakeInstance()
        fake_inst.db_setup(cpus=1, memory=1024,
                           block_devices={'devices': [
                               {'size': 21, 'base': 'some-os'}
                           ]})

        exc = self.assertRaises(exceptions.LowResourceException,
                                scheduler.Scheduler().place_instance,
                                fake_inst,
                                [])
        self.assertEqual('No nodes with enough disk space', str(exc))
示例#19
0
    def test_not_enough_disk(self):
        self.mock_etcd.set_node_metrics_same({
            'cpu_max_per_instance': 16,
            'cpu_max': 4,
            'memory_available': 22000,
            'memory_max': 24000,
            'disk_free_instances': 20 * GiB,
            'cpu_total_instance_vcpus': 4,
            'cpu_available': 12
        })

        fake_inst = self.mock_etcd.create_instance('fake-inst',
                                                   'fakeuuid',
                                                   disk_spec=[{
                                                       'base': 'cirros',
                                                       'size': 21
                                                   }])

        exc = self.assertRaises(exceptions.LowResourceException,
                                scheduler.Scheduler().place_instance,
                                fake_inst, [])
        self.assertEqual('No nodes with enough disk space', str(exc))
示例#20
0
 def test_no_such_node(self):
     fake_inst = self.mock_etcd.create_instance('fake-inst', 'fakeuuid')
     self.assertRaises(exceptions.CandidateNodeNotFoundException,
                       scheduler.Scheduler().place_instance,
                       fake_inst, [],
                       candidates=['barry'])
示例#21
0
 def test_no_metrics(self):
     fake_inst = self.mock_etcd.create_instance('fake-inst', 'fakeuuid')
     exc = self.assertRaises(exceptions.LowResourceException,
                             scheduler.Scheduler().place_instance,
                             fake_inst, [])
     self.assertEqual('No nodes with metrics', str(exc))
示例#22
0
    def post(self,
             name=None,
             cpus=None,
             memory=None,
             network=None,
             disk=None,
             ssh_key=None,
             user_data=None,
             placed_on=None,
             namespace=None,
             instance_uuid=None):
        global SCHEDULER

        # We need to sanitise the name so its safe for DNS
        name = re.sub(r'([^a-zA-Z0-9_\-])', '', name)

        if not namespace:
            namespace = get_jwt_identity()

        # If accessing a foreign namespace, we need to be an admin
        if get_jwt_identity() not in [namespace, 'system']:
            return error(
                401,
                'only admins can create resources in a different namespace')

        # The instance needs to exist in the DB before network interfaces are created
        if not instance_uuid:
            instance_uuid = str(uuid.uuid4())
            db.add_event('instance', instance_uuid, 'uuid allocated', None,
                         None, None)

        # Create instance object
        instance = virt.from_db(instance_uuid)
        if instance:
            if get_jwt_identity() not in [
                    instance.db_entry['namespace'], 'system'
            ]:
                LOG.info('instance(%s): instance not found, ownership test' %
                         instance_uuid)
                return error(404, 'instance not found')

        if not instance:
            instance = virt.from_definition(uuid=instance_uuid,
                                            name=name,
                                            disks=disk,
                                            memory_mb=memory,
                                            vcpus=cpus,
                                            ssh_key=ssh_key,
                                            user_data=user_data,
                                            owner=namespace)

        if not SCHEDULER:
            SCHEDULER = scheduler.Scheduler()

        # Have we been placed?
        if not placed_on:
            candidates = SCHEDULER.place_instance(instance, network)
            if len(candidates) == 0:
                db.add_event('instance', instance_uuid, 'schedule', 'failed',
                             None, 'insufficient resources')
                db.update_instance_state(instance_uuid, 'error')
                return error(507, 'insufficient capacity')

            placed_on = candidates[0]
            db.place_instance(instance_uuid, placed_on)
            db.add_event('instance', instance_uuid, 'placement', None, None,
                         placed_on)

        else:
            try:
                candidates = SCHEDULER.place_instance(instance,
                                                      network,
                                                      candidates=[placed_on])
                if len(candidates) == 0:
                    db.add_event('instance', instance_uuid, 'schedule',
                                 'failed', None, 'insufficient resources')
                    db.update_instance_state(instance_uuid, 'error')
                    return error(507, 'insufficient capacity')
            except scheduler.CandidateNodeNotFoundException as e:
                return error(404, 'node not found: %s' % e)

        # Have we been placed on a different node?
        if not placed_on == config.parsed.get('NODE_NAME'):
            body = flask_get_post_body()
            body['placed_on'] = placed_on
            body['instance_uuid'] = instance_uuid
            body['namespace'] = namespace

            token = util.get_api_token(
                'http://%s:%d' % (placed_on, config.parsed.get('API_PORT')),
                namespace=namespace)
            r = requests.request('POST',
                                 'http://%s:%d/instances' %
                                 (placed_on, config.parsed.get('API_PORT')),
                                 data=json.dumps(body),
                                 headers={
                                     'Authorization': token,
                                     'User-Agent': util.get_user_agent()
                                 })

            LOG.info('Returning proxied request: %d, %s' %
                     (r.status_code, r.text))
            resp = flask.Response(r.text, mimetype='application/json')
            resp.status_code = r.status_code
            return resp

        # Check we can get the required IPs
        nets = {}
        allocations = {}

        def error_with_cleanup(status_code, message):
            for network_uuid in allocations:
                n = net.from_db(network_uuid)
                for addr, _ in allocations[network_uuid]:
                    with db.get_lock('sf/ipmanager/%s' % n.uuid, ttl=120) as _:
                        ipm = db.get_ipmanager(n.uuid)
                        ipm.release(addr)
                        db.persist_ipmanager(n.uuid, ipm.save())
            return error(status_code, message)

        order = 0
        if network:
            for netdesc in network:
                if 'network_uuid' not in netdesc or not netdesc['network_uuid']:
                    return error_with_cleanup(404, 'network not specified')

                if netdesc['network_uuid'] not in nets:
                    n = net.from_db(netdesc['network_uuid'])
                    if not n:
                        return error_with_cleanup(
                            404,
                            'network %s not found' % netdesc['network_uuid'])
                    nets[netdesc['network_uuid']] = n
                    n.create()

                with db.get_lock('sf/ipmanager/%s' % netdesc['network_uuid'],
                                 ttl=120) as _:
                    db.add_event('network', netdesc['network_uuid'],
                                 'allocate address', None, None, instance_uuid)
                    allocations.setdefault(netdesc['network_uuid'], [])
                    ipm = db.get_ipmanager(netdesc['network_uuid'])
                    if 'address' not in netdesc or not netdesc['address']:
                        netdesc['address'] = ipm.get_random_free_address()
                    else:
                        if not ipm.reserve(netdesc['address']):
                            return error_with_cleanup(
                                409, 'address %s in use' % netdesc['address'])
                    db.persist_ipmanager(netdesc['network_uuid'], ipm.save())
                    allocations[netdesc['network_uuid']].append(
                        (netdesc['address'], order))

                if 'model' not in netdesc or not netdesc['model']:
                    netdesc['model'] = 'virtio'

                db.create_network_interface(str(uuid.uuid4()), netdesc,
                                            instance_uuid, order)

                order += 1

        # Initialise metadata
        db.persist_metadata('instance', instance_uuid, {})

        # Now we can start the instance
        with db.get_lock('sf/instance/%s' % instance.db_entry['uuid'],
                         ttl=900) as lock:
            with util.RecordedOperation('ensure networks exist',
                                        instance) as _:
                for network_uuid in nets:
                    n = nets[network_uuid]
                    n.ensure_mesh()
                    n.update_dhcp()

            with util.RecordedOperation('instance creation', instance) as _:
                instance.create(lock=lock)

            for iface in db.get_instance_interfaces(instance.db_entry['uuid']):
                db.update_network_interface_state(iface['uuid'], 'created')

            return db.get_instance(instance_uuid)
示例#23
0
    def post(self,
             name=None,
             cpus=None,
             memory=None,
             network=None,
             disk=None,
             ssh_key=None,
             user_data=None,
             placed_on=None,
             namespace=None,
             instance_uuid=None,
             video=None):
        global SCHEDULER

        # Check that the instance name is safe for use as a DNS host name
        if name != re.sub(r'([^a-zA-Z0-9_\-])', '', name) or len(name) > 63:
            return error(400,
                         'instance name must be useable as a DNS host name')

        # Sanity check
        if not disk:
            return error(400, 'instance must specify at least one disk')
        for d in disk:
            if not isinstance(d, dict):
                return error(400,
                             'disk specification should contain JSON objects')

        if network:
            for n in network:
                if not isinstance(n, dict):
                    return error(
                        400,
                        'network specification should contain JSON objects')

                if 'network_uuid' not in n:
                    return error(
                        400, 'network specification is missing network_uuid')

        if not video:
            video = {'model': 'cirrus', 'memory': 16384}

        if not namespace:
            namespace = get_jwt_identity()

        # Only system can specify a uuid
        if instance_uuid and get_jwt_identity() != 'system':
            return error(401, 'only system can specify an instance uuid')

        # If accessing a foreign namespace, we need to be an admin
        if get_jwt_identity() not in [namespace, 'system']:
            return error(
                401,
                'only admins can create resources in a different namespace')

        # The instance needs to exist in the DB before network interfaces are created
        if not instance_uuid:
            instance_uuid = str(uuid.uuid4())
            db.add_event('instance', instance_uuid, 'uuid allocated', None,
                         None, None)

        # Create instance object
        instance = virt.from_db(instance_uuid)
        if instance:
            if get_jwt_identity() not in [
                    instance.db_entry['namespace'], 'system'
            ]:
                logutil.info([virt.ThinInstance(instance_uuid)],
                             'Instance not found, ownership test')
                return error(404, 'instance not found')

        if not instance:
            instance = virt.from_definition(uuid=instance_uuid,
                                            name=name,
                                            disks=disk,
                                            memory_mb=memory,
                                            vcpus=cpus,
                                            ssh_key=ssh_key,
                                            user_data=user_data,
                                            owner=namespace,
                                            video=video,
                                            requested_placement=placed_on)

        # Initialise metadata
        db.persist_metadata('instance', instance_uuid, {})

        # Allocate IP addresses
        order = 0
        if network:
            for netdesc in network:
                n = net.from_db(netdesc['network_uuid'])
                if not n:
                    db.enqueue_instance_delete(
                        config.parsed.get('NODE_NAME'), instance_uuid, 'error',
                        'missing network %s during IP allocation phase' %
                        netdesc['network_uuid'])
                    return error(
                        404, 'network %s not found' % netdesc['network_uuid'])

                with db.get_lock('ipmanager',
                                 None,
                                 netdesc['network_uuid'],
                                 ttl=120):
                    db.add_event('network', netdesc['network_uuid'],
                                 'allocate address', None, None, instance_uuid)
                    ipm = db.get_ipmanager(netdesc['network_uuid'])
                    if 'address' not in netdesc or not netdesc['address']:
                        netdesc['address'] = ipm.get_random_free_address()
                    else:
                        if not ipm.reserve(netdesc['address']):
                            db.enqueue_instance_delete(
                                config.parsed.get('NODE_NAME'), instance_uuid,
                                'error',
                                'failed to reserve an IP on network %s' %
                                netdesc['network_uuid'])
                            return error(
                                409, 'address %s in use' % netdesc['address'])

                    db.persist_ipmanager(netdesc['network_uuid'], ipm.save())

                if 'model' not in netdesc or not netdesc['model']:
                    netdesc['model'] = 'virtio'

                db.create_network_interface(str(uuid.uuid4()), netdesc,
                                            instance_uuid, order)

        if not SCHEDULER:
            SCHEDULER = scheduler.Scheduler()

        try:
            # Have we been placed?
            if not placed_on:
                candidates = SCHEDULER.place_instance(instance, network)
                placement = candidates[0]

            else:
                SCHEDULER.place_instance(instance,
                                         network,
                                         candidates=[placed_on])
                placement = placed_on

        except exceptions.LowResourceException as e:
            db.add_event('instance', instance_uuid, 'schedule', 'failed', None,
                         'insufficient resources: ' + str(e))
            db.enqueue_instance_delete(config.parsed.get('NODE_NAME'),
                                       instance_uuid, 'error',
                                       'scheduling failed')
            return error(507, str(e))

        except exceptions.CandidateNodeNotFoundException as e:
            db.add_event('instance', instance_uuid, 'schedule', 'failed', None,
                         'candidate node not found: ' + str(e))
            db.enqueue_instance_delete(config.get.parsed('NODE_NAME'),
                                       instance_uuid, 'error',
                                       'scheduling failed')
            return error(404, 'node not found: %s' % e)

        # Record placement
        db.place_instance(instance_uuid, placement)
        db.add_event('instance', instance_uuid, 'placement', None, None,
                     placement)

        # Create a queue entry for the instance start
        tasks = [{
            'type': 'instance_preflight',
            'instance_uuid': instance_uuid,
            'network': network
        }]
        for disk in instance.db_entry['block_devices']['devices']:
            if 'base' in disk and disk['base']:
                tasks.append({
                    'type': 'image_fetch',
                    'instance_uuid': instance_uuid,
                    'url': disk['base']
                })
        tasks.append({
            'type': 'instance_start',
            'instance_uuid': instance_uuid,
            'network': network
        })

        # Enqueue creation tasks on desired node task queue
        db.enqueue(placement, {'tasks': tasks})
        db.add_event('instance', instance_uuid, 'create', 'enqueued', None,
                     None)

        # Watch for a while and return results if things are fast, give up
        # after a while and just return the current state
        start_time = time.time()
        while time.time() - start_time < config.parsed.get('API_ASYNC_WAIT'):
            i = db.get_instance(instance_uuid)
            if i['state'] in ['created', 'deleted', 'error']:
                return i
            time.sleep(0.5)
        return i