def main_service():
    """Method to use with Openstack service."""
    ConfigInitializer.basic_config()
    LogUtils.init_logger(__name__)
    launcher = os_service.ServiceLauncher(cfg.CONF, restart_method='mutate')
    launcher.launch_service(Transform())
    launcher.wait()
    def test_use_default_config_file(self):

        ConfigInitializer.basic_config(default_config_files=[])

        self.assertEqual(
            'monasca_transform.mysql_offset_specs:MySQLOffsetSpecs',
            cfg.CONF.repositories.offsets)

        self.assertEqual(
            'monasca_transform.data_driven_specs.'
            'mysql_data_driven_specs_repo:MySQLDataDrivenSpecsRepo',
            cfg.CONF.repositories.data_driven_specs)

        self.assertEqual('mysql:thin',
                         cfg.CONF.database.server_type)
        self.assertEqual('localhost',
                         cfg.CONF.database.host)
        self.assertEqual('monasca_transform',
                         cfg.CONF.database.database_name)
        self.assertEqual('m-transform',
                         cfg.CONF.database.username)
        self.assertEqual('password',
                         cfg.CONF.database.password)
        self.assertIsNone(cfg.CONF.database.ca_file)
        self.assertFalse(cfg.CONF.database.use_ssl)
Beispiel #3
0
def invoke():
    # object to keep track of offsets
    ConfigInitializer.basic_config()

    # app name
    application_name = "mon_metrics_kafka"

    my_spark_conf = SparkConf().setAppName(application_name)

    spark_context = SparkContext(conf=my_spark_conf)

    # read at the configured interval
    spark_streaming_context = \
        StreamingContext(spark_context, cfg.CONF.service.stream_interval)

    kafka_stream = MonMetricsKafkaProcessor.get_kafka_stream(
        cfg.CONF.messaging.topic,
        spark_streaming_context)

    # transform to recordstore
    MonMetricsKafkaProcessor.transform_to_recordstore(kafka_stream)

    # catch interrupt, stop streaming context gracefully
    # signal.signal(signal.SIGINT, signal_handler)

    # start processing
    spark_streaming_context.start()

    # FIXME: stop spark context to relinquish resources

    # FIXME: specify cores, so as not to use all the resources on the cluster.

    # FIXME: HA deploy multiple masters, may be one on each control node

    try:
        # Wait for the Spark driver to "finish"
        spark_streaming_context.awaitTermination()
    except Exception as e:
        MonMetricsKafkaProcessor.log_debug(
            "Exception raised during Spark execution : " + str(e))
        # One exception that can occur here is the result of the saved
        # kafka offsets being obsolete/out of range.  Delete the saved
        # offsets to improve the chance of success on the next execution.

        # TODO(someone) prevent deleting all offsets for an application,
        # but just the latest revision
        MonMetricsKafkaProcessor.log_debug(
            "Deleting saved offsets for chance of success on next execution")

        MonMetricsKafkaProcessor.reset_kafka_offsets(application_name)

        # delete pre hourly processor offsets
        if cfg.CONF.stage_processors.pre_hourly_processor_enabled:
            PreHourlyProcessor.reset_kafka_offsets()
 def setUp(self):
     super(SparkTest, self).setUp()
     # configure the system with a dummy messaging adapter
     ConfigInitializer.basic_config(
         default_config_files=[
             'tests/unit/test_resources/config/'
             'test_config_with_dummy_messaging_adapter.conf'])
     # reset metric_id list dummy adapter
     if not DummyAdapter.adapter_impl:
         DummyAdapter.init()
     DummyAdapter.adapter_impl.metric_list = []
 def setUp(self):
     super(TestVmCpuAllocatedAgg, self).setUp()
     # configure the system with a dummy messaging adapter
     ConfigInitializer.basic_config(default_config_files=[
         'tests/functional/test_resources/config/'
         'test_config_with_dummy_messaging_adapter.conf'
     ])
     # reset metric_id list dummy adapter
     if not DummyAdapter.adapter_impl:
         DummyAdapter.init()
     DummyAdapter.adapter_impl.metric_list = []
 def setUp(self):
     super(TestFetchQuantityInstanceUsageAgg, self).setUp()
     # configure the system with a dummy messaging adapter
     ConfigInitializer.basic_config(
         default_config_files=[
             'tests/functional/test_resources/config/'
             'test_config_with_dummy_messaging_adapter.conf'])
     # reset metric_id list dummy adapter
     if not DummyAdapter.adapter_impl:
         DummyAdapter.init()
     DummyAdapter.adapter_impl.metric_list = []
def invoke():
    # object to keep track of offsets
    ConfigInitializer.basic_config()

    # app name
    application_name = "mon_metrics_kafka"

    my_spark_conf = SparkConf().setAppName(application_name)

    spark_context = SparkContext(conf=my_spark_conf)

    # read at the configured interval
    spark_streaming_context = \
        StreamingContext(spark_context, cfg.CONF.service.stream_interval)

    kafka_stream = MonMetricsKafkaProcessor.get_kafka_stream(
        cfg.CONF.messaging.topic,
        spark_streaming_context)

    # transform to recordstore
    MonMetricsKafkaProcessor.transform_to_recordstore(kafka_stream)

    # catch interrupt, stop streaming context gracefully
    # signal.signal(signal.SIGINT, signal_handler)

    # start processing
    spark_streaming_context.start()

    # FIXME: stop spark context to relinquish resources

    # FIXME: specify cores, so as not to use all the resources on the cluster.

    # FIXME: HA deploy multiple masters, may be one on each control node

    try:
        # Wait for the Spark driver to "finish"
        spark_streaming_context.awaitTermination()
    except Exception as e:
        MonMetricsKafkaProcessor.log_debug(
            "Exception raised during Spark execution : " + str(e))
        # One exception that can occur here is the result of the saved
        # kafka offsets being obsolete/out of range.  Delete the saved
        # offsets to improve the chance of success on the next execution.

        # TODO(someone) prevent deleting all offsets for an application,
        # but just the latest revision
        MonMetricsKafkaProcessor.log_debug(
            "Deleting saved offsets for chance of success on next execution")

        MonMetricsKafkaProcessor.reset_kafka_offsets(application_name)

        # delete pre hourly processor offsets
        if cfg.CONF.stage_processors.pre_hourly_processor_enabled:
            PreHourlyProcessor.reset_kafka_offsets()
Beispiel #8
0
 def setUp(self):
     ConfigInitializer.basic_config(default_config_files=[
         'tests/functional/test_resources/config/test_config.conf'
     ])
     self.config = Config()
     self.config.config(group='database',
                        use_ssl=True,
                        host='test_ssl_hostname',
                        server_type='jdbc_driver',
                        database_name='db_name',
                        username='******',
                        password='******',
                        ca_file='ca_file')
     self.config.setUp()
Beispiel #9
0
    def test_use_specific_config_file(self):

        ConfigInitializer.basic_config(default_config_files=[
            'tests/functional/test_resources/config/test_config.conf'
        ])
        self.assertEqual('test_offsets_repo_class',
                         cfg.CONF.repositories.offsets)
        self.assertEqual('test_data_driven_specs_repo_class',
                         cfg.CONF.repositories.data_driven_specs)
        self.assertEqual('test_server_type', cfg.CONF.database.server_type)
        self.assertEqual('test_host_name', cfg.CONF.database.host)
        self.assertEqual('test_database_name', cfg.CONF.database.database_name)
        self.assertEqual('test_database_user_name', cfg.CONF.database.username)
        self.assertEqual('test_database_password', cfg.CONF.database.password)
        self.assertEqual('test_ca_file_path', cfg.CONF.database.ca_file)
        self.assertTrue(cfg.CONF.database.use_ssl)
 def setUp(self):
     ConfigInitializer.basic_config(
         default_config_files=[
             'tests/functional/test_resources/config/test_config.conf'
         ])
     self.config = Config()
     self.config.config(
         group='database',
         use_ssl=True,
         host='test_ssl_hostname',
         server_type='jdbc_driver',
         database_name='db_name',
         username='******',
         password='******',
         ca_file='ca_file')
     self.config.setUp()
    def insert(transform_context, instance_usage_df):
        """write instance usage data to kafka"""

        # object to init config
        ConfigInitializer.basic_config()

        transform_spec_df = transform_context.transform_spec_df_info

        agg_params = transform_spec_df.select(
            "aggregation_params_map.dimension_list").collect()[0].asDict()

        # Approach # 1
        # using foreachPartition to iterate through elements in an
        # RDD is the recommended approach so as to not overwhelm kafka with the
        # zillion connections (but in our case the MessageAdapter does
        # store the adapter_impl so we should not create many producers)

        # using foreachpartitions was causing some serialization/cpickle
        # problems where few libs like kafka.SimpleProducer and oslo_config.cfg
        # were not available in foreachPartition method
        #
        # removing _write_metrics_from_partition for now in favor of
        # Approach # 2
        #

        # instance_usage_df_agg_params = instance_usage_df.rdd.map(
        #    lambda x: InstanceUsageDataAggParams(x,
        #                                        agg_params))
        # instance_usage_df_agg_params.foreachPartition(
        #     DummyInsert._write_metrics_from_partition)

        # Approach # 2
        # using collect() to fetch all elements of an RDD and write to
        # kafka

        for instance_usage_row in instance_usage_df.collect():
            metric = InsertComponent._get_metric(instance_usage_row,
                                                 agg_params)
            # validate metric part
            if InsertComponent._validate_metric(metric):
                KafkaMessageAdapter.send_metric(metric)
        return instance_usage_df
    def insert(transform_context, instance_usage_df):
        """write instance usage data to kafka"""

        # object to init config
        ConfigInitializer.basic_config()

        transform_spec_df = transform_context.transform_spec_df_info

        agg_params = transform_spec_df.select(
            "aggregation_params_map.dimension_list").collect()[0].asDict()

        # Approach # 1
        # using foreachPartition to iterate through elements in an
        # RDD is the recommended approach so as to not overwhelm kafka with the
        # zillion connections (but in our case the MessageAdapter does
        # store the adapter_impl so we should not create many producers)

        # using foreachpartitions was causing some serialization/cpickle
        # problems where few libs like kafka.SimpleProducer and oslo_config.cfg
        # were not available in foreachPartition method
        #
        # removing _write_metrics_from_partition for now in favor of
        # Approach # 2
        #

        # instance_usage_df_agg_params = instance_usage_df.rdd.map(
        #    lambda x: InstanceUsageDataAggParams(x,
        #                                        agg_params))
        # instance_usage_df_agg_params.foreachPartition(
        #     DummyInsert._write_metrics_from_partition)

        # Approach # 2
        # using collect() to fetch all elements of an RDD and write to
        # kafka

        for instance_usage_row in instance_usage_df.collect():
            metric = InsertComponent._get_metric(
                instance_usage_row, agg_params)
            # validate metric part
            if InsertComponent._validate_metric(metric):
                KafkaMessageAdapter.send_metric(metric)
        return instance_usage_df
Beispiel #13
0
    def test_use_default_config_file(self):

        ConfigInitializer.basic_config(default_config_files=[])

        self.assertEqual(
            'monasca_transform.mysql_offset_specs:MySQLOffsetSpecs',
            cfg.CONF.repositories.offsets)

        self.assertEqual(
            'monasca_transform.data_driven_specs.'
            'mysql_data_driven_specs_repo:MySQLDataDrivenSpecsRepo',
            cfg.CONF.repositories.data_driven_specs)

        self.assertEqual('mysql:thin', cfg.CONF.database.server_type)
        self.assertEqual('localhost', cfg.CONF.database.host)
        self.assertEqual('monasca_transform', cfg.CONF.database.database_name)
        self.assertEqual('m-transform', cfg.CONF.database.username)
        self.assertEqual('password', cfg.CONF.database.password)
        self.assertIsNone(cfg.CONF.database.ca_file)
        self.assertFalse(cfg.CONF.database.use_ssl)
    def test_use_specific_config_file(self):

        ConfigInitializer.basic_config(
            default_config_files=[
                'tests/unit/test_resources/config/test_config.conf'
            ])
        self.assertEqual('test_offsets_repo_class',
                         cfg.CONF.repositories.offsets)
        self.assertEqual('test_data_driven_specs_repo_class',
                         cfg.CONF.repositories.data_driven_specs)
        self.assertEqual('test_server_type',
                         cfg.CONF.database.server_type)
        self.assertEqual('test_host_name',
                         cfg.CONF.database.host)
        self.assertEqual('test_database_name',
                         cfg.CONF.database.database_name)
        self.assertEqual('test_database_user_name',
                         cfg.CONF.database.username)
        self.assertEqual('test_database_password',
                         cfg.CONF.database.password)
Beispiel #15
0
    def insert(transform_context, instance_usage_df):
        """write instance usage data to kafka"""

        # object to init config
        ConfigInitializer.basic_config()

        transform_spec_df = transform_context.transform_spec_df_info

        agg_params = transform_spec_df.select(
            "metric_id").\
            collect()[0].asDict()
        metric_id = agg_params["metric_id"]

        for instance_usage_row in instance_usage_df.collect():
            instance_usage_dict = \
                InsertComponent._get_instance_usage_pre_hourly(
                    instance_usage_row,
                    metric_id)
            KafkaMessageAdapterPreHourly.send_metric(instance_usage_dict)

        return instance_usage_df
    def insert(transform_context, instance_usage_df):
        """write instance usage data to kafka"""

        # object to init config
        ConfigInitializer.basic_config()

        transform_spec_df = transform_context.transform_spec_df_info

        agg_params = transform_spec_df.select(
            "metric_id").\
            collect()[0].asDict()
        metric_id = agg_params["metric_id"]

        for instance_usage_row in instance_usage_df.collect():
            instance_usage_dict = \
                InsertComponent._get_instance_usage_pre_hourly(
                    instance_usage_row,
                    metric_id)
            KafkaMessageAdapterPreHourly.send_metric(instance_usage_dict)

        return instance_usage_df
 def setUp(self):
     ConfigInitializer.basic_config()
     super(TestMySQLDataDrivenSpecsRepo, self).setUp()
     self.data_driven_specs_repo = MySQLDataDrivenSpecsRepo()
 def setUp(self):
     super(TransformBuilderTest, self).setUp()
     # configure the system with a dummy messaging adapter
     ConfigInitializer.basic_config(
         default_config_files=[
             'tests/functional/test_resources/config/test_config.conf'])
 def setUp(self):
     ConfigInitializer.basic_config()
     super(TestMySQLDataDrivenSpecsRepo, self).setUp()
     self.data_driven_specs_repo = MySQLDataDrivenSpecsRepo()
 def setUp(self):
     ConfigInitializer.basic_config()
     self.kafka_offset_specs = MySQLOffsetSpecs()
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import unittest

from monasca_transform.config.config_initializer import ConfigInitializer

ConfigInitializer.basic_config(
    default_config_files=[
        'tests/unit/test_resources/config/'
        'test_config.conf']
)
from monasca_transform.processor.processor_util import PreHourlyProcessorUtil
from monasca_transform.processor.processor_util import ProcessUtilDataProvider


class PreHourlyProcessorTest(unittest.TestCase):

    def setUp(self):
        pass

    def test_is_time_to_run_before_late_metric_slack_time(self):
        check_time = datetime.datetime(
            year=2016, month=11, day=7, hour=11,
            minute=9, second=59, microsecond=0)
Beispiel #22
0
from monasca_transform.data_driven_specs.data_driven_specs_repo \
    import DataDrivenSpecsRepo

from monasca_transform.data_driven_specs.data_driven_specs_repo \
    import DataDrivenSpecsRepoFactory

from monasca_transform.processor.pre_hourly_processor import PreHourlyProcessor

from monasca_transform.transform import RddTransformContext
from monasca_transform.transform.storage_utils import \
    InvalidCacheStorageLevelException
from monasca_transform.transform.storage_utils import StorageUtils
from monasca_transform.transform.transform_utils import MonMetricUtils
from monasca_transform.transform import TransformContextUtils

ConfigInitializer.basic_config()
log = LogUtils.init_logger(__name__)


class MonMetricsKafkaProcessor(object):

    @staticmethod
    def log_debug(message):
        print(message)
        log.debug(message)

    @staticmethod
    def store_offset_ranges(batch_time, rdd):
        if rdd.isEmpty():
            MonMetricsKafkaProcessor.log_debug(
                "storeOffsetRanges: nothing to process...")
Beispiel #23
0
 def setUp(self):
     super(TransformBuilderTest, self).setUp()
     # configure the system with a dummy messaging adapter
     ConfigInitializer.basic_config(default_config_files=[
         'tests/functional/test_resources/config/test_config.conf'
     ])
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import unittest

from monasca_transform.config.config_initializer import ConfigInitializer

ConfigInitializer.basic_config(default_config_files=[
    'tests/unit/test_resources/config/'
    'test_config.conf'
])
from monasca_transform.processor.processor_util import PreHourlyProcessorUtil
from monasca_transform.processor.processor_util import ProcessUtilDataProvider


class PreHourlyProcessorTest(unittest.TestCase):
    def setUp(self):
        pass

    def test_is_time_to_run_before_late_metric_slack_time(self):
        check_time = datetime.datetime(year=2016,
                                       month=11,
                                       day=7,
                                       hour=11,
                                       minute=9,
 def setUp(self):
     ConfigInitializer.basic_config()
     self.kafka_offset_specs = MySQLOffsetSpecs()
    import GenericTransformBuilder

from monasca_transform.data_driven_specs.data_driven_specs_repo \
    import DataDrivenSpecsRepo

from monasca_transform.data_driven_specs.data_driven_specs_repo \
    import DataDrivenSpecsRepoFactory

from monasca_transform.processor.pre_hourly_processor import PreHourlyProcessor

from monasca_transform.transform import RddTransformContext
from monasca_transform.transform.storage_utils import StorageUtils
from monasca_transform.transform.transform_utils import MonMetricUtils
from monasca_transform.transform import TransformContextUtils

ConfigInitializer.basic_config()

# initialize logger
log = logging.getLogger(__name__)
_h = logging.FileHandler('%s/%s' % (
    cfg.CONF.service.service_log_path,
    cfg.CONF.service.service_log_filename)
)
_h.setFormatter(logging.Formatter("'%(asctime)s - %(pathname)s:"
                                  "%(lineno)s - %(levelname)s - %(message)s'"))
log.addHandler(_h)
log.setLevel(logging.DEBUG)


class MonMetricsKafkaProcessor(object):