def setUp(self):
        super(TestSimpleProportionalStrategy, self).setUp()
        # Setup a few simple cases, that are mostly defaults.
        self.environments = {
            'one_unprovisioned_node_environment': {
                'nodes': [sb.NodeInput("caaa", "Compute", False, False, None)],
            },
            'one_provisioned_node_environment': {
                'nodes': [sb.NodeInput("caaa", "Compute", True, False, None)],
            },
            'two_node_environment': {
                'nodes': [
                    sb.NodeInput("caaa", "Compute", False, False, None),
                    sb.NodeInput("cbbb", "Compute", False, False, None),
                ],
            }
        }

        # Some programmatically constructed. With random provision and
        # cached states. Also, some nodes will have invalid images.
        node_counts = [0, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 1000]
        flavors = ["IO", "Compute", "Memory"]
        images_with_invalid = []
        images_with_invalid.extend(sb_test.TEST_IMAGES)
        images_with_invalid.append(INVALID_IMAGE)
        for n_count in node_counts:
            print("Building random nodes for random-nodes(%d)" % n_count)
            environment = {'nodes': []}
            for n in range(n_count):
                flavor_pick = random.choice(flavors)
                image_pick_uuid = random.choice(images_with_invalid).uuid
                provisioned_choice = random.choice([False, True])

                cache_choice = False
                if not provisioned_choice:
                    cache_choice = random.choice([False, True])

                environment['nodes'].append(
                    sb.NodeInput("%s-%d" % (flavor_pick[0], n),
                                 flavor_pick,
                                 provisioned_choice,
                                 cache_choice,
                                 image_pick_uuid))
                print(str(environment['nodes'][-1]))
            self.environments["random-nodes(%d)" % n_count] = environment

        # Defaults
        for env_name, env_dict in self.environments.iteritems():
            env_dict['flavors'] = sb_test.TEST_FLAVORS
            env_dict['images'] = sb_test.TEST_IMAGES
Example #2
0
    def test_images_with_zero_weight_not_cached(self):
        image_weights = {
            'Ubuntu': 5,
            'CoreOS': 10,
            'Windows': 0,
        }

        sb._load_image_weights_file.image_weights = image_weights

        CONF.set_override('default_image_weight', 0, 'strategy')

        test_scenario = {
            'num_images': 30,
            'images': TEST_IMAGES,
            'nodes': (
                [sb.NodeInput('c-%d' % n, 'compute', False, False, 'aaaa') for
                    n in range(0, 30)])
        }

        picked_images = (
            sb.choose_weighted_images_forced_distribution(**test_scenario))

        # Make sure no images were picked with a zero weight.
        expected_names = [name for name, weight in six.iteritems(image_weights)
                          if weight > 0]
        for image in picked_images:
            self.assertIn(image.name, expected_names,
                          "Found an unexpected image cached. Image had a "
                          "zero weight. Image name %s" % (image.name))
Example #3
0
def convert_ironic_node(ironic_node, known_flavors=None):
    flavor_name = resolve_flavor(ironic_node, known_flavors)

    if flavor_name is None:
        LOG.error("Unable to identify flavor of node '%(node)s'",
                  {'node': ironic_node.uuid})
        return None

    return sb.NodeInput(ironic_node.uuid, flavor_name,
                        is_node_provisioned(ironic_node),
                        is_node_cached(ironic_node),
                        get_node_cached_image_uuid(ironic_node))
 def test_dont_eject_provisioned_nodes(self):
     """Don't try to eject nodes which are considered provisioned."""
     strategy = sps.SimpleProportionalStrategy()
     invalid_cached_and_provisioned_node = {
         'nodes': [sb.NodeInput("caaa", "Compute", True, True,
                                INVALID_IMAGE)],
         'flavors': sb_test.TEST_FLAVORS,
         'images': sb_test.TEST_IMAGES
     }
     strategy.update_current_state(**invalid_cached_and_provisioned_node)
     directives = strategy.directives()
     self.assertTrue(len(directives) == 0,
                     "Trying to eject a provisioned node!")
 def test_convert_ironic_node(self):
     test_node = MockIronicNode(TEST_IRONIC_NODE_DATA)
     node_input = openstack_scout.convert_ironic_node(test_node)
     expected_node = strat_base.NodeInput(
         TEST_IRONIC_NODE_DATA['uuid'],
         TEST_IRONIC_NODE_DATA['extra']['flavor'], False, True,
         TEST_IRONIC_NODE_DATA['driver_info']['cache_image_id'])
     for attr in ('node_uuid', 'flavor', 'provisioned', 'cached',
                  'cached_image_uuid'):
         expected = getattr(expected_node, attr)
         got = getattr(node_input, attr)
         self.assertEqual(
             expected, got, "Attribute on node returned by "
             "convert_ironic_node did not match expectations. "
             "Expected '%(expected)s', got '%(got)s'." % ({
                 'expected': expected,
                 'got': got
             }))
Example #6
0
    def _test_proportion_goal_versus_flavor(self, strat, directives, nodes,
                                            flavor):
        print("Testing flavor %s." % flavor.name)
        flavor_nodes = list(
            filter(lambda node: flavor.is_flavor_node(node), nodes))
        unprovisioned_node_count = len(sps.unprovisioned_nodes(flavor_nodes))
        available_node_count = len(
            sps.nodes_available_for_caching(flavor_nodes))
        cached_node_count = len(
            list(filter(lambda node: node.cached, flavor_nodes)))
        if directives:
            cache_directive_count = len(
                list(
                    filter(
                        lambda directive: isinstance(
                            directive, sb.CacheNode) and flavor.is_flavor_node(
                                sb.NodeInput(directive.node_uuid, '?')),
                        directives)))
        else:
            cache_directive_count = 0
        self.assertTrue(cache_directive_count <= available_node_count,
                        ("There shouldn't be more cache directives than "
                         "there are nodes available to cache."))

        total_percent_cached = 0
        if unprovisioned_node_count != 0 and available_node_count != 0:
            total_percent_cached = (cache_directive_count + cached_node_count
                                    ) / (unprovisioned_node_count)
            # This handles the fact that SimpleProportionalStrategy floors
            # the number of nodes to cache, so we don't always cache a node
            # if there's only a fractional node to cache.
            total_percent_cached += (1 / unprovisioned_node_count)
            self.assertTrue(
                (total_percent_cached >= strat.percentage_to_cache),
                ("The number of cache directives emitted by the "
                 "strategy does not fulfill the goal! Total percent to "
                 "be cached: %f, expected %f" %
                 (total_percent_cached, strat.percentage_to_cache)))
        else:
            self.assertTrue(
                cache_directive_count == 0,
                ("Since there are no available nodes to cache, the number "
                 "of cache directives should be zero, but got %d" %
                 (cache_directive_count)))
Example #7
0
 def test_determine_image_distribution(self):
     TEST_NODE_SET = [
         sb.NodeInput('c-1', 'compute', False, True, 'aaaa'),
         sb.NodeInput('c-2', 'compute', False, False, 'bbbb'),
         sb.NodeInput('c-3', 'compute', True, False, 'cccc'),
         sb.NodeInput('c-4', 'compute', False, True, 'aaaa'),
         sb.NodeInput('c-5', 'compute', False, True, 'bbbb'),
         sb.NodeInput('c-6', 'compute', False, True, 'dddd')
     ]
     distribution_by_uuid = sb._determine_image_distribution(TEST_NODE_SET)
     self.assertEqual(2, distribution_by_uuid['aaaa'])
     self.assertEqual(1, distribution_by_uuid['bbbb'])
     self.assertEqual(1, distribution_by_uuid['dddd'])
     self.assertEqual(0, distribution_by_uuid['cccc'])
Example #8
0
    def test_image_weight_guided_node_ejection(self):
        test_envs = {
            'Already ideal distribution': {},
            'Ubuntu+10': {'Ubuntu': 10},
            'CentOS+5': {'CentOS': 5},
            'Ubuntu and CoreOS': {'Ubuntu': 10, 'CoreOS': 20},
            'TempleOS': {'TempleOS': 50},
            'A lot of minix!': {'Minix': 100},
            'Many offsets': {'Ubuntu': 4, 'Minix': 3, 'Redhat': 2,
                             'CoreOS': 1},
        }

        for env_name, image_offsets in six.iteritems(test_envs):
            print("Testing '%(name)s' on image_weight_guided_ejection." % {
                  'name': env_name})
            nodes = []

            image_frequencies = copy.deepcopy(self.WEIGHTED_IMAGES)
            for image_name, offset in six.iteritems(image_offsets):
                image_frequencies[image_name] += offset

            # Construct a set of nodes with the desired frequency
            for image_name, frequency in six.iteritems(image_frequencies):
                image = self.EJECTION_IMAGES_BY_NAME[image_name]
                for n in range(0, frequency):
                    nodes.append(
                        sb.NodeInput('c-%d' % len(nodes), 'compute',
                                     False, True, image.uuid))

            image_list = sb.image_weight_guided_ejection(self.EJECTION_IMAGES,
                                                         nodes)

            eject_frequencies = collections.defaultdict(lambda: 0)
            for image in image_list:
                eject_frequencies[image.name] += 1

            eject_frequency_list = (
                [(k, v) for k, v in six.iteritems(eject_frequencies)])

            eject_frequency_list.sort(key=lambda pair: pair[1])

            sorted_ejection_list = (
                [image_name for image_name, frequency in eject_frequency_list])

            if len(image_offsets) == 0:
                self.assertEqual(0, len(image_offsets))
                return

            named_offsets = [(n, o) for n, o in six.iteritems(image_offsets)]
            sorted_offsets = sorted(named_offsets, key=lambda pair: pair[1])

            expected_sorted_ejection_list = (
                [image_name for image_name, offset in sorted_offsets])

            # The number of kinds of images ejected shouldn't exceed our
            # expected number of kinds.
            self.assertTrue(
                (len(sorted_ejection_list) <=
                    len(expected_sorted_ejection_list)))

            # It's OK if the number of kinds returned is less than expected,
            # as long as the relative order is maintained.
            if len(expected_sorted_ejection_list) > len(sorted_ejection_list):
                length_diff = (len(expected_sorted_ejection_list) -
                               len(sorted_ejection_list))
                expected_sorted_ejection_list = (
                    expected_sorted_ejection_list[length_diff:])

            # We make sure that the relative ejection rate of images matches
            # our general expectation.
            self.assertEqual(expected_sorted_ejection_list,
                             sorted_ejection_list)
Example #9
0
    def test_choose_weighted_images_forced_distribution(self):
        test_scenarios = {
            '10-all nodes available': {
                'num_images': 10,
                'images': TEST_IMAGES,
                'nodes': [sb.NodeInput('c-%d' % (n), 'compute', False, False,
                                       'aaaa') for n in range(0, 10)]
            },
            '100-all nodes available': {
                'num_images': 50,
                'images': TEST_IMAGES,
                'nodes': [sb.NodeInput('c-%d' % (n), 'compute', False, False,
                                       'aaaa') for n in range(0, 100)]
            },
            '1000-all nodes available': {
                'num_images': 1000,
                'images': TEST_IMAGES,
                'nodes': [sb.NodeInput('c-%d' % (n), 'compute', False, False,
                                       'aaaa') for n in range(0, 1000)]
            },
            'all nodes available - num of nodes machine image weight sum': {
                'num_images': self.IMAGE_WEIGHT_SUM,
                'images': TEST_IMAGES,
                'nodes': [sb.NodeInput('c-%d' % (n), 'compute', False, False,
                          'aaaa') for n in range(0, self.IMAGE_WEIGHT_SUM)]
            },
        }

        # Provides a list which coupled with random selection should
        # closely match the image weights. Therefore already cached nodes
        # in these scenarios already closely match the distribution.
        weighted_image_uuids = []
        for image in TEST_IMAGES:
            weighted_image_uuids.extend(
                [image.uuid
                 for n in range(0, self.WEIGHTED_IMAGES[image.name])])

        images_by_uuids = {image.uuid: image for image in TEST_IMAGES}

        # Generate some more varied scenarios.
        for num_nodes in [1, 2, 3, 5, 10, 20, 50, 100, 1000, 10000]:
            new_scenario = {
                'images': TEST_IMAGES,
                'num_images': int(math.floor(num_nodes * 0.25)),
                'nodes': []
            }
            for n in range(0, num_nodes):
                cached = n % 4 == 0
                provisioned = n % 7 == 0
                cached_image_uuid = random.choice(weighted_image_uuids)
                generated_node = sb.NodeInput("c-%d" % (n),
                                              'compute',
                                              provisioned,
                                              cached,
                                              cached_image_uuid)
                new_scenario['nodes'].append(generated_node)
            test_scenarios['%d-random scenario' % (num_nodes)] = new_scenario

        # Now test each scenario.
        for name, values in six.iteritems(test_scenarios):
            print("Testing against '%s' scenario." % (name))
            picked_images = sb.choose_weighted_images_forced_distribution(
                **values)

            picked_distribution = collections.defaultdict(lambda: 0)
            for image in picked_images:
                picked_distribution[image.name] += 1

            self.assertEqual(values['num_images'], len(picked_images),
                             "Didn't get the expected number of selected "
                             "images from "
                             "choose_weighted_images_forced_distribution")

            num_already_cached = len([node for node in values['nodes']
                                      if node.cached and not node.provisioned])
            scale = (num_already_cached + values['num_images']) / sum(
                [self.WEIGHTED_IMAGES[image.name] for image in TEST_IMAGES])

            already_cached = collections.defaultdict(lambda: 0)
            for node in values['nodes']:
                if node.cached and not node.provisioned:
                    image = images_by_uuids[node.cached_image_uuid]
                    already_cached[image.name] += 1

            targeted_distribution = {
                image.name: (picked_distribution[image.name] +
                             already_cached[image.name])
                for image in TEST_IMAGES
            }

            print(''.join(["Picked distribution: %s\n" % (
                           str(picked_distribution)),
                           "Already cached distribution: %s\n" % (
                           str(already_cached)),
                           "Targeted distribution: %s\n" % (
                           str(targeted_distribution)),
                           "Image weights: %s\n" % str(self.WEIGHTED_IMAGES),
                           "scale factor: %f" % scale]))

            for image in values['images']:
                print("Inspecting image '%s'." % (image.name))
                image_weight = self.WEIGHTED_IMAGES[image.name]
                num_image_already_cached = len([
                    node for node in values['nodes'] if node.cached and
                    not node.provisioned and
                    node.cached_image_uuid == image.uuid])
                expected_num_of_selected_images = (
                    int(math.floor(scale * image_weight)) -
                    num_image_already_cached)
                # Sometimes an underweighted image will be cached a great deal
                # more than should be given the current weights. Clamp this
                # the expectation to zero.
                if expected_num_of_selected_images < 0:
                    expected_num_of_selected_images = 0
                num_picked = len(
                    [pi for pi in picked_images if pi.name == image.name])
                failure_msg = (
                    "The number of selected images for image "
                    "'%(image_name)s' did not match expectations. "
                    "Expected %(expected)d and got %(actual)d. " %
                    {'image_name': image.name,
                     'expected': expected_num_of_selected_images,
                     'actual': num_picked})
                self.assertAlmostEqual(num_picked,
                                       expected_num_of_selected_images,
                                       delta=1,
                                       msg=failure_msg)
Example #10
0
 def setUp(self):
     super(TestNodeStatistics, self).setUp()
     self.test_nodes = [
         sb.NodeInput('c-1', 'Compute', False, True, 'aaaa'),
         sb.NodeInput('c-2', 'Compute', False, True, 'aaaa'),
         sb.NodeInput('c-3', 'Compute', False, True, 'aaaa'),
         sb.NodeInput('c-4', 'Compute', True, False, 'aaaa'),
         sb.NodeInput('c-5', 'Compute', True, False, 'aaaa'),
         sb.NodeInput('c-6', 'Compute', True, False, 'aaaa'),
         sb.NodeInput('c-7', 'Compute', False, True, 'bbbb'),
         sb.NodeInput('c-8', 'Compute', False, True, 'bbbb'),
         sb.NodeInput('c-9', 'Compute', True, True, 'bbbb'),
         sb.NodeInput('c-10', 'Compute', False, True, 'cccc'),
         sb.NodeInput('c-11', 'Compute', False, False, None),
         sb.NodeInput('c-12', 'Compute', False, False, None),
         # NOTE(ClifHouck): The following uuid should not be present in
         # TEST_IMAGES. Tests reaction to unknown uuids.
         sb.NodeInput('c-13', 'Compute', False, True, 'wasd'),
     ]
Example #11
0
    ]

FAKE_FLAVOR_DATA = [
    sb.FlavorInput('io-flavor', lambda n: True),
    sb.FlavorInput('memory-flavor', lambda n: True),
    sb.FlavorInput('cpu-flavor', lambda n: True)
]

FAKE_IMAGE_DATA = [
    sb.ImageInput('Ubuntu', 'aaaa', 'ubuntu-checksum'),
    sb.ImageInput('CoreOS', 'aaaa', 'coreos-checksum'),
    sb.ImageInput('ArchLinux', 'aaaa', 'archlinux-checksum')
]

FAKE_NODE_DATA = [
    sb.NodeInput('abcd', 'io-flavor', False, False),
    sb.NodeInput('hjkl', 'memory-flavor', False, False),
    sb.NodeInput('asdf', 'compute-flavor', False, False)
]


class TestScheduler(base.TestCase):

    @mock.patch.object(scheduler.DirectorScheduler, 'periodic_tasks')
    @mock.patch('arsenal.director.onmetal_scout.OnMetalV1Scout')
    def setUp(self, onmetal_scout_mock, periodic_task_mock):
        super(TestScheduler, self).setUp()
        CONF.set_override('scout', 'onmetal_scout.OnMetalV1Scout', 'director')
        CONF.set_override('dry_run', False, 'director')
        # Make sure both rate limiters are off at the beginning of the test.
        CONF.set_override('cache_directive_rate_limit', 0, 'director')