def setUp(self):
     super(TestFetchQuantityInstanceUsageAgg, self).setUp()
     # configure the system with a dummy messaging adapter
     ConfigInitializer.basic_config(
         default_config_files=[
             'tests/functional/test_resources/config/'
             'test_config_with_dummy_messaging_adapter.conf'])
     # reset metric_id list dummy adapter
     if not DummyAdapter.adapter_impl:
         DummyAdapter.init()
     DummyAdapter.adapter_impl.metric_list = []
 def setUp(self):
     super(TestVmCpuAllocatedAgg, self).setUp()
     # configure the system with a dummy messaging adapter
     ConfigInitializer.basic_config(default_config_files=[
         'tests/functional/test_resources/config/'
         'test_config_with_dummy_messaging_adapter.conf'
     ])
     # reset metric_id list dummy adapter
     if not DummyAdapter.adapter_impl:
         DummyAdapter.init()
     DummyAdapter.adapter_impl.metric_list = []
コード例 #3
0
    def insert(transform_context, instance_usage_df):
        """write instance usage data to kafka"""

        transform_spec_df = transform_context.transform_spec_df_info

        agg_params = transform_spec_df.select(
            "aggregation_params_map"
            ".dimension_list").collect()[0].asDict()

        cfg.CONF.set_override(
            'adapter',
            'tests.functional.messaging.adapter:DummyAdapter',
            group='messaging')

        # Approach 1
        # using foreachPartition to iterate through elements in an
        # RDD is the recommended approach so as to not overwhelm kafka with the
        # zillion connections (but in our case the MessageAdapter does
        # store the adapter_impl so we should not create many producers)

        # using foreachpartitions was causing some serialization (cpickle)
        # problems where few libs like kafka.SimpleProducer and oslo_config.cfg
        # were not available
        #
        # removing _write_metrics_from_partition for now in favor of
        # Approach 2
        #

        # instance_usage_df_agg_params = instance_usage_df.rdd.map(
        #     lambda x: InstanceUsageDataAggParams(x,
        #                                        agg_params))
        # instance_usage_df_agg_params.foreachPartition(
        #     DummyInsert._write_metrics_from_partition)

        #
        # Approach # 2
        #
        # using collect() to fetch all elements of an RDD
        # and write to kafka
        #

        for instance_usage_row in instance_usage_df.collect():
            metric = InsertComponent._get_metric(instance_usage_row,
                                                 agg_params)
            # validate metric part
            if InsertComponent._validate_metric(metric):
                DummyAdapter.send_metric(metric)
        return instance_usage_df