Exemple #1
0
def init():
    with patch.object(etcd, "Client") as patch_client:
        with patch.object(Client, "read") as patch_read:
            with patch.object(Client, "write") as patch_write:
                with patch.object(cs_utils, "write") as util_write:
                    with patch.object(cs_utils, "read") as util_read:
                        with patch.object(
                            cmn_config,
                            "load_config"
                        ) as load_conf:
                            with patch.object(
                                node.NodeContext,
                                '_get_node_id'
                            ) as patch_get_node_id:
                                patch_get_node_id.return_value = 1
                                patch_read.return_value = etcd.Client()
                                patch_write.return_value = etcd.Client()
                                patch_client.return_value = etcd.Client()
                                util_read.return_value = etcd.Client()
                                util_write.return_value = etcd.Client()
                                # creating monitoring NS
                                dummy_conf = {"etcd_port": "1234",
                                              "etcd_connection": "127.0.0.1"
                                              }
                                load_conf.return_value = dummy_conf
                                TendrlNS("gluster",
                                         "tendrl.gluster_integration")
                                # overwriting conf
                                setattr(NS, "type", "")
                                setattr(NS,
                                        "publisher_id",
                                        "monitoring_integration")
                                NS._int.etcd_kwargs = {
                                    'port': 1,
                                    'host': 2,
                                    'allow_reconnect': True}
                                NS["config"] = maps.NamedDict()
                                NS["conf"] = maps.NamedDict()
                                NS.config["data"] = maps.NamedDict()
                                NS.config.data["sync_interval"] = 10
                                NS.config.data['tags'] = "test"
                                NS.state_sync_thread = mock.MagicMock()
                                NS.sds_sync_thread = mock.MagicMock()
                                NS.message_handler_thread = mock.MagicMock()
                                with patch.object(
                                    etcd_utils, "read"
                                ) as utils_read:
                                    utils_read.return_value = maps.NamedDict(
                                        value='{"tags":[]}'
                                    )
                                    with patch.object(
                                        BaseObject, "load"
                                    ) as node_load:
                                        node.load = MagicMock()
                                        node_load.return_value = node
                                        TendrlNS()
                                NS["tendrl_context"] = maps.NamedDict(
                                    integration_id="77deef29-b8e5-4dc5-"
                                    "8247-21e2a409a66a"
                                )
def init():
    with patch.object(etcd, "Client") as patch_client:
        with patch.object(Client, "read") as patch_read:
            with patch.object(Client, "write") as patch_write:
                with patch.object(cs_utils, "write") as util_write:
                    with patch.object(cs_utils, "read") as util_read:
                        with patch.object(cmn_config,
                                          "load_config") as load_conf:
                            with patch.object(
                                    node.NodeContext,
                                    '_get_node_id') as patch_get_node_id:
                                patch_get_node_id.return_value = 1
                                patch_read.return_value = etcd.Client()
                                patch_write.return_value = etcd.Client()
                                patch_client.return_value = etcd.Client()
                                util_read.return_value = etcd.Client()
                                util_write.return_value = etcd.Client()
                                # creating monitoring NS
                                dummy_conf = {
                                    "etcd_port": "1234",
                                    "etcd_connection": "127.0.0.1"
                                }
                                load_conf.return_value = dummy_conf
                                TendrlNS("monitoring",
                                         "tendrl.monitoring_integration")
                                # overwriting conf
                                setattr(NS, "type", "monitoring")
                                setattr(NS, "publisher_id",
                                        "monitoring_integration")
                                NS._int.etcd_kwargs = {
                                    'port': 1,
                                    'host': 2,
                                    'allow_reconnect': True
                                }
                                NS["config"] = maps.NamedDict()
                                NS["conf"] = maps.NamedDict()
                                NS.config["data"] = maps.NamedDict()
                                NS.conf["dashboards"] = []
                                NS.config.data['tags'] = "test"
                                setattr(NS.config.data, "credentials",
                                        mock.MagicMock())
                                setattr(NS.config.data, "grafana_host",
                                        "127.0.0.1")
                                setattr(NS.config.data, "grafana_port", "1234")
                                setattr(NS.config.data, "datasource_host",
                                        "127.0.0.1")
                                setattr(NS.config.data, "datasource_port",
                                        "234")
                                setattr(NS.config.data, "datasource_name",
                                        "test")
                                setattr(NS.config.data, "datasource_type",
                                        "test_type")
                                setattr(NS.config.data, "access", "test")
                                setattr(NS.config.data, "basicAuth", True)
                                setattr(NS.config.data, "isDefault", True)
                                TendrlNS()
def main():
    gluster_integration.GlusterIntegrationNS()
    TendrlNS()

    NS.type = "sds"
    NS.publisher_id = "gluster_integration"

    NS.central_store_thread = central_store.GlusterIntegrationEtcdCentralStore(
    )
    NS.state_sync_thread = sds_sync.GlusterIntegrationSdsSyncStateThread()

    NS.node_context.save()
    try:
        NS.tendrl_context = NS.tendrl_context.load()
        Event(
            Message(priority="info",
                    publisher=NS.publisher_id,
                    payload={
                        "message":
                        "Integration %s is part of sds cluster" %
                        NS.tendrl_context.integration_id
                    }))
    except etcd.EtcdKeyNotFound:
        Event(
            Message(priority="error",
                    publisher=NS.publisher_id,
                    payload={
                        "message":
                        "Node %s is not part of any sds cluster" %
                        NS.node_context.node_id
                    }))
        raise Exception("Integration cannot be started,"
                        " please Import or Create sds cluster"
                        " in Tendrl and include Node %s" %
                        NS.node_context.node_id)

    NS.tendrl_context.save()
    NS.gluster.definitions.save()
    NS.gluster.config.save()

    pm = ProvisioningManager("GdeployPlugin")
    NS.gdeploy_plugin = pm.get_plugin()

    m = GlusterIntegrationManager()
    m.start()

    complete = gevent.event.Event()

    def shutdown():
        Event(
            Message(priority="info",
                    publisher=NS.publisher_id,
                    payload={"message": "Signal handler: stopping"}))
        complete.set()

    gevent.signal(signal.SIGTERM, shutdown)
    gevent.signal(signal.SIGINT, shutdown)

    while not complete.is_set():
        complete.wait(timeout=1)
Exemple #4
0
def main():
    AlertingNS()
    TendrlNS()
    NS.alerting.definitions.save()
    NS.alerting.config.save()
    NS.publisher_id = "alerting"

    if NS.config.data.get("with_internal_profiling", False):
        from tendrl.commons import profiler
        profiler.start()

    tendrl_alerting_manager = TendrlAlertingManager()
    tendrl_alerting_manager.start()

    complete = gevent.event.Event()

    def terminate(sig, frame):
        Event(
            Message("debug", "alerting", {
                "message": 'Signal handler: stopping',
            }))
        tendrl_alerting_manager.stop()
        complete.set()

    gevent.signal(signal.SIGINT, terminate)
    gevent.signal(signal.SIGTERM, terminate)

    while not complete.is_set():
        complete.wait(timeout=1)
def main():
    gluster_integration.GlusterIntegrationNS()
    TendrlNS()

    NS.type = "sds"

    NS.central_store_thread = central_store.GlusterIntegrationEtcdCentralStore(
    )
    NS.state_sync_thread = sds_sync.GlusterIntegrationSdsSyncStateThread()

    NS.node_context.save()
    NS.tendrl_context.save()
    NS.gluster_integration.definitions.save()
    NS.gluster_integration.config.save()
    NS.publisher_id = "gluster_integration"

    m = GlusterIntegrationManager()
    m.start()

    complete = gevent.event.Event()

    def shutdown():
        LOG.info("Signal handler: stopping")
        complete.set()

    gevent.signal(signal.SIGTERM, shutdown)
    gevent.signal(signal.SIGINT, shutdown)

    while not complete.is_set():
        complete.wait(timeout=1)
Exemple #6
0
def init(patch_node_load, patch_etcd_utils_read, patch_get_node_id, patch_read,
         patch_client):
    patch_get_node_id.return_value = 1
    patch_read.return_value = etcd.Client()
    patch_client.return_value = etcd.Client()
    setattr(__builtin__, "NS", maps.NamedDict())
    setattr(NS, "_int", maps.NamedDict())
    NS._int.etcd_kwargs = {'port': 1, 'host': 2, 'allow_reconnect': True}
    NS._int.client = etcd.Client(**NS._int.etcd_kwargs)
    NS._int.wclient = etcd.Client(**NS._int.etcd_kwargs)
    NS["config"] = maps.NamedDict()
    NS.config["data"] = maps.NamedDict()
    NS.config.data['tags'] = "test"
    NS.publisher_id = "node_context"
    patch_etcd_utils_read.return_value = maps.NamedDict(
        value='{"status": "UP",'
        '"pkey": "tendrl-node-test",'
        '"node_id": "test_node_id",'
        '"ipv4_addr": "test_ip",'
        '"tags": "[\\"my_tag\\"]",'
        '"sync_status": "done",'
        '"locked_by": "fd",'
        '"fqdn": "tendrl-node-test",'
        '"last_sync": "date"}')
    patch_node_load.return_value = node.NodeContext
    tendrlNS = TendrlNS()
    return tendrlNS
Exemple #7
0
def main():
    NodeMonitoringNS()
    TendrlNS()
    NS.type = "monitoring"

    complete = gevent.event.Event()

    NS.state_sync_thread = NodeMonitoringSyncStateThread()
    NS.node_monitoring.definitions.save()
    NS.node_monitoring.config.save()
    NS.publisher_id = "node_monitoring"

    manager = NodeMonitoringManager()
    manager.start()

    def shutdown():
        Event(
            Message(
                priority="debug",
                publisher=NS.publisher_id,
                payload={"message": "Signal handler: stopping"}
            )
        )
        complete.set()

    gevent.signal(signal.SIGTERM, shutdown)
    gevent.signal(signal.SIGINT, shutdown)

    while not complete.is_set():
        complete.wait(timeout=1)
Exemple #8
0
def main():
    PerformanceMonitoringNS()
    TendrlNS()
    NS.publisher_id = "performance_monitoring"
    NS.time_series_db_manager = TimeSeriesDBManager()
    NS.performance_monitoring.definitions.save()
    NS.performance_monitoring.config.save()
    
    if NS.config.data.get("with_internal_profiling", False):
        from tendrl.commons import profiler
        profiler.start()
        
    tendrl_perf_manager = TendrlPerformanceManager()

    def terminate(sig, frame):
        Event(
            Message(
                priority="debug",
                publisher=NS.publisher_id,
                payload={"message": "Signal handler: stopping"}
            )
        )
        tendrl_perf_manager.stop()

    signal.signal(signal.SIGINT, terminate)
    tendrl_perf_manager.start()
Exemple #9
0
def main():
    NotifierNS()
    TendrlNS()
    NS.notifier.definitions.save()
    NS.notifier.config.save()
    NS.publisher_id = "notifier"

    if NS.config.data.get("with_internal_profiling", False):
        from tendrl.commons import profiler
        profiler.start()
    tendrl_notifier_manager = TendrlNotifierManager()
    tendrl_notifier_manager.start()
    complete = threading.Event()

    def terminate(signum, frame):
        log("debug", "notifier", {
            "message": 'Signal handler: stopping',
        })
        complete.set()
        tendrl_notifier_manager.stop()

    def reload_config(signum, frame):
        NS.notifier.ns.setup_common_objects()

    signal.signal(signal.SIGINT, terminate)
    signal.signal(signal.SIGTERM, terminate)
    signal.signal(signal.SIGHUP, reload_config)

    while not complete.is_set():
        complete.wait(timeout=1)
def init(patch_etcd_utils_read, patch_get_node_id, patch_read, patch_client,
         tc):
    tc.return_value = tendrl_context.TendrlContext
    patch_get_node_id.return_value = 1
    patch_read.return_value = etcd.Client()
    patch_client.return_value = etcd.Client()
    NS._int.etcd_kwargs = {'port': 1, 'host': 2, 'allow_reconnect': True}
    NS._int.client = etcd.Client(**NS._int.etcd_kwargs)
    NS["config"] = maps.NamedDict()
    NS.config["data"] = maps.NamedDict()
    NS.config.data['tags'] = "test"
    patch_etcd_utils_read.return_value = maps.NamedDict(
        value='{"status": "UP",'
        '"pkey": "tendrl-node-test",'
        '"node_id": "test_node_id",'
        '"ipv4_addr": "test_ip",'
        '"tags": "[\\"my_tag\\"]",'
        '"sync_status": "done",'
        '"locked_by": "fd",'
        '"fqdn": "",'
        '"last_sync": "date"}')
    with patch.object(etcd_utils, "read") as utils_read:
        utils_read.return_value = maps.NamedDict(value='{"tags":[]}')
        with patch.object(BaseObject, "load") as node_load:
            node.load = MagicMock()
            node_load.return_value = node
    tendrlNS = TendrlNS()
    return tendrlNS
def init(patch_etcd_utils_read, patch_get_node_id, patch_read, patch_client):
    patch_get_node_id.return_value = 1
    patch_read.return_value = etcd.Client()
    patch_client.return_value = etcd.Client()
    setattr(__builtin__, "NS", maps.NamedDict())
    setattr(NS, "_int", maps.NamedDict())
    NS._int.etcd_kwargs = {'port': 1, 'host': 2, 'allow_reconnect': True}
    NS._int.client = etcd.Client(**NS._int.etcd_kwargs)
    NS._int.wclient = etcd.Client(**NS._int.etcd_kwargs)
    NS.publisher_id = "node_agent"
    NS["config"] = maps.NamedDict()
    NS.config["data"] = maps.NamedDict(logging_socket_path="test/path")
    NS.node_context = maps.NamedDict()
    NS.node_context.node_id = 1
    NS.node_context['fqdn'] = "test_fqdn"
    NS.config.data['package_source_type'] = 'test pip'
    NS.config.data['tags'] = "test"
    NS.config.data['etcd_port'] = 8085
    NS.config.data['etcd_connection'] = "Test Connection"
    NS.config.data['sync_interval'] = 30
    NS.compiled_definitions = mock.MagicMock()
    patch_etcd_utils_read.return_value = maps.NamedDict(
        value='{"status": "UP",'
        '"pkey": "tendrl-node-test",'
        '"node_id": "test_node_id",'
        '"ipv4_addr": "test_ip",'
        '"tags": "[\\"my_tag\\"]",'
        '"sync_status": "done",'
        '"locked_by": "fd",'
        '"fqdn": "tendrl-node-test",'
        '"last_sync": "date"}')
    tendrlNS = TendrlNS()
    return tendrlNS
def test_constructor():
    with patch.object(TendrlNS, 'setup_common_objects') as \
            mocked_method:
        mocked_method.return_value = None
        tendrlNS = TendrlNS()
    tendrlNS = init()
    # Default Parameter Testing
    assert tendrlNS.ns_name == "tendrl"
    assert tendrlNS.ns_src == "tendrl.commons"
    # Check for existance and right data type
    assert isinstance(NS, maps.NamedDict)
Exemple #13
0
def init(patch_get_node_id, patch_read, patch_client):
    patch_get_node_id.return_value = 1
    patch_read.return_value = etcd.Client()
    patch_client.return_value = etcd.Client()
    setattr(__builtin__, "NS", maps.NamedDict())
    setattr(NS, "_int", maps.NamedDict())
    NS._int.etcd_kwargs = {'port': 1, 'host': 2, 'allow_reconnect': True}
    NS._int.client = etcd.Client(**NS._int.etcd_kwargs)
    NS["config"] = maps.NamedDict()
    NS.config["data"] = maps.NamedDict()
    NS.config.data['tags'] = "test"
    tendrlNS = TendrlNS()
    return tendrlNS
def main():
    monitoring_integration.MonitoringIntegrationNS()

    TendrlNS()
    grafana_conn_count = 0
    while grafana_conn_count < 10:
        if not utils.port_open(NS.config.data["grafana_port"],
                               NS.config.data["grafana_host"]):
            grafana_conn_count = grafana_conn_count + 1
            time.sleep(4)
        else:
            break
    if grafana_conn_count == 10:
        logger.log("error", NS.get("publisher_id", None),
                   {'message': "Cannot connect to Grafana"})
        return
    NS.type = "monitoring"
    NS.publisher_id = "monitoring_integration"
    if NS.config.data.get("with_internal_profiling", False):
        from tendrl.commons import profiler
        profiler.start()
    NS.monitoring.config.save()
    NS.monitoring.definitions.save()
    NS.sync_thread = sync.MonitoringIntegrationSdsSyncThread()

    monitoring_integration_manager = MonitoringIntegrationManager()
    monitoring_integration_manager.start()
    complete = threading.Event()
    NS.node_context = NS.node_context.load()
    current_tags = list(NS.node_context.tags)
    current_tags += ["tendrl/integration/monitoring"]
    NS.node_context.tags = list(set(current_tags))
    NS.node_context.save()

    def shutdown(signum, frame):
        complete.set()
        NS.sync_thread.stop()

    def reload_config(signum, frame):
        NS.monitoring.ns.setup_common_objects()

    signal.signal(signal.SIGTERM, shutdown)
    signal.signal(signal.SIGINT, shutdown)
    signal.signal(signal.SIGHUP, reload_config)

    while not complete.is_set():
        complete.wait(timeout=1)
Exemple #15
0
def main():
    AlertingNS()
    TendrlNS()
    NS.alerting.definitions.save()
    NS.alerting.config.save()
    NS.publisher_id = "alerting"

    tendrl_alerting_manager = TendrlAlertingManager()

    def terminate(sig, frame):
        Event(
            Message("info", "alerting", {
                "message": 'Signal handler: stopping',
            }))
        tendrl_alerting_manager.stop()

    signal.signal(signal.SIGINT, terminate)
    tendrl_alerting_manager.start()
def init(patch_get_node_id, patch_read, patch_client):
    patch_get_node_id.return_value = 1
    patch_read.return_value = etcd.Client()
    patch_client.return_value = etcd.Client()
    setattr(__builtin__, "NS", maps.NamedDict())
    setattr(NS, "_int", maps.NamedDict())
    NS._int.etcd_kwargs = {'port': 1, 'host': 2, 'allow_reconnect': True}
    NS._int.client = etcd.Client(**NS._int.etcd_kwargs)
    NS._int.wclient = etcd.Client(**NS._int.etcd_kwargs)
    NS["config"] = maps.NamedDict()
    NS.config["data"] = maps.NamedDict()
    NS.config.data['tags'] = "test"
    NS.config.data['logging_socket_path'] = "test"
    NS.publisher_id = "node_context"
    NS.config.data['etcd_port'] = 8085
    NS.config.data['etcd_connection'] = "Test Connection"
    tendrlNS = TendrlNS()
    return tendrlNS
Exemple #17
0
def init(patch_get_node_id, patch_read, patch_client):
    patch_get_node_id.return_value = 1
    patch_read.return_value = etcd.Client()
    patch_client.return_value = etcd.Client()
    setattr(__builtin__, "NS", maps.NamedDict())
    setattr(NS, "_int", maps.NamedDict())
    NS._int.etcd_kwargs = {
        'port': 1,
        'host': 2,
        'allow_reconnect': True}
    NS._int.client = etcd.Client(**NS._int.etcd_kwargs)
    NS["config"] = maps.NamedDict()
    NS.config["data"] = maps.NamedDict()
    NS.config.data['tags'] = "test"
    NS.state_sync_thread = mock.MagicMock()
    NS.sds_sync_thread = mock.MagicMock()
    NS.message_handler_thread = mock.MagicMock()
    with patch.object(json, "loads") as loads:
        loads.return_value = {"tags": ""}
        tendrlNS = TendrlNS()
        return tendrlNS
Exemple #18
0
def init(patch_get_node_id, patch_read, patch_client):
    patch_get_node_id.return_value = 1
    patch_read.return_value = etcd.Client()
    patch_client.return_value = etcd.Client()
    setattr(__builtin__, "NS", maps.NamedDict())
    setattr(NS, "_int", maps.NamedDict())
    NS._int.etcd_kwargs = {'port': 1, 'host': 2, 'allow_reconnect': True}
    NS._int.client = etcd.Client(**NS._int.etcd_kwargs)
    NS._int.wclient = etcd.Client(**NS._int.etcd_kwargs)
    NS.publisher_id = "node_agent"
    NS["config"] = maps.NamedDict()
    NS.config["data"] = maps.NamedDict(logging_socket_path="test/path")
    NS.node_context = maps.NamedDict()
    NS.node_context.node_id = 1
    NS.config.data['package_source_type'] = 'test pip'
    NS.config.data['tags'] = "test"
    NS.config.data['etcd_port'] = 8085
    NS.config.data['etcd_connection'] = "Test Connection"
    NS.config.data['sync_interval'] = 30
    NS.compiled_definitions = mock.MagicMock()
    tendrlNS = TendrlNS()
    return tendrlNS
def main():

    monitoring_integration.MonitoringIntegrationNS()

    TendrlNS()
    NS.type = "monitoring"
    NS.publisher_id = "monitoring_integration"
    NS.monitoring.config.save()
    NS.monitoring.definitions.save()
    NS.sync_thread = sync.MonitoringIntegrationSdsSyncThread()

    monitoring_integration_manager = MonitoringIntegrationManager()
    monitoring_integration_manager.start()
    complete = gevent.event.Event()

    def shutdown():
        complete.set()
        NS.sync_thread.stop()

    gevent.signal(signal.SIGTERM, shutdown)
    gevent.signal(signal.SIGINT, shutdown)

    while not complete.is_set():
        complete.wait(timeout=1)
Exemple #20
0
def main():
    gluster_integration.GlusterIntegrationNS()
    TendrlNS()

    NS.type = "sds"
    NS.publisher_id = "gluster_integration"

    NS.state_sync_thread = sds_sync.GlusterIntegrationSdsSyncStateThread()

    NS.message_handler_thread = GlusterNativeMessageHandler()

    while NS.tendrl_context.integration_id is None or \
        NS.tendrl_context.integration_id == "":
        logger.log(
            "debug", NS.publisher_id, {
                "message":
                "Waiting for tendrl-node-agent %s to "
                "detect sds cluster (integration_id not found)" %
                NS.node_context.node_id
            })
        NS.tendrl_context = NS.tendrl_context.load()

    logger.log(
        "debug", NS.publisher_id, {
            "message":
            "Integration %s is part of sds cluster" %
            NS.tendrl_context.integration_id
        })

    NS.gluster.definitions.save()
    NS.gluster.config.save()

    pm = ProvisioningManager("GdeployPlugin")
    NS.gdeploy_plugin = pm.get_plugin()
    if NS.config.data.get("with_internal_profiling", False):
        from tendrl.commons import profiler
        profiler.start()

    m = GlusterIntegrationManager()
    m.start()

    complete = threading.Event()

    def shutdown(signum, frame):
        logger.log("debug", NS.publisher_id,
                   {"message": "Signal handler: stopping"})
        complete.set()
        m.stop()

    def reload_config(signum, frame):
        logger.log(
            "debug", NS.publisher_id,
            {"message": "Signal handler: SIGHUP,"
             " reload service config"})
        NS.gluster.ns.setup_common_objects()

    signal.signal(signal.SIGTERM, shutdown)
    signal.signal(signal.SIGINT, shutdown)
    signal.signal(signal.SIGHUP, reload_config)

    while not complete.is_set():
        complete.wait(timeout=1)
def main():
    ceph_integration.CephIntegrationNS()
    TendrlNS()

    NS.type = "sds"
    NS.publisher_id = "ceph_integration"

    NS.central_store_thread =\
        central_store.CephIntegrationEtcdCentralStore()
    NS.state_sync_thread = sds_sync.CephIntegrationSdsSyncStateThread()

    NS.node_context.save()

    # Check if Integration is part of any Tendrl imported/created sds cluster
    try:
        NS.tendrl_context = NS.tendrl_context.load()
        Event(
            Message(priority="info",
                    publisher=NS.publisher_id,
                    payload={
                        "message":
                        "Integration %s is part of sds cluster" %
                        NS.tendrl_context.integration_id
                    }))

        _detected_cluster = NS.tendrl.objects.DetectedCluster().load()
        NS.tendrl_context.cluster_id = _detected_cluster.detected_cluster_id
        NS.tendrl_context.cluster_name = "gluster-%s" % _detected_cluster.detected_cluster_id
        NS.tendrl_context.sds_name = _detected_cluster.sds_pkg_name
        NS.tendrl_context.sds_version = _detected_cluster.sds_pkg_version

    except etcd.EtcdKeyNotFound:
        Event(
            Message(priority="error",
                    publisher=NS.publisher_id,
                    payload={
                        "message":
                        "Node %s is not part of any sds cluster" %
                        NS.node_context.node_id
                    }))
        raise Exception("Integration cannot be started, "
                        "please Import or Create sds cluster in Tendrl "
                        "and include Node %s" % NS.node_context.node_id)

    NS.tendrl_context.save()
    NS.ceph.definitions.save()
    NS.ceph.config.save()

    m = CephIntegrationManager()
    m.start()

    complete = gevent.event.Event()

    def shutdown():
        Event(
            Message(priority="info",
                    publisher=NS.publisher_id,
                    payload={"message": "Signal handler: stopping"}))
        complete.set()

    gevent.signal(signal.SIGTERM, shutdown)
    gevent.signal(signal.SIGINT, shutdown)

    while not complete.is_set():
        complete.wait(timeout=1)
def main():
    gluster_integration.GlusterIntegrationNS()
    TendrlNS()

    NS.type = "sds"
    NS.publisher_id = "gluster_integration"

    NS.state_sync_thread = sds_sync.GlusterIntegrationSdsSyncStateThread()

    NS.message_handler_thread = GlusterNativeMessageHandler()

    while NS.tendrl_context.integration_id is None or \
        NS.tendrl_context.integration_id == "":
        logger.log(
            "debug",
            NS.publisher_id,
            {
                "message": "Waiting for tendrl-node-agent %s to "
                "detect sds cluster (integration_id not found)" %
                NS.node_context.node_id
            }
        )
        NS.tendrl_context = NS.tendrl_context.load()

    logger.log(
        "debug",
        NS.publisher_id,
        {
            "message": "Integration %s is part of sds cluster" %
            NS.tendrl_context.integration_id
        }
    )

    NS.gluster.definitions.save()
    NS.gluster.config.save()

    pm = ProvisioningManager("GdeployPlugin")
    NS.gdeploy_plugin = pm.get_plugin()
    if NS.config.data.get("with_internal_profiling", False):
        from tendrl.commons import profiler
        profiler.start()

    m = GlusterIntegrationManager()
    m.start()

    complete = threading.Event()

    def shutdown(signum, frame):
        logger.log(
            "debug",
            NS.publisher_id,
            {"message": "Signal handler: stopping"}
        )
        # Remove the node's name from gluster server tag
        try:
            gl_srvr_list = etcd_utils.read(
                "/indexes/tags/gluster/server"
            ).value
            gl_srvr_list = json.loads(gl_srvr_list)
            if NS.node_context.node_id in gl_srvr_list:
                gl_srvr_list.remove(NS.node_context.node_id)
            etcd_utils.write(
                "/indexes/tags/gluster/server",
                json.dumps(gl_srvr_list)
            )
            node_tags = json.loads(NS.node_context.tags)
            if 'provisioner/%s' % NS.tendrl_context.integration_id \
                in node_tags:
                etcd_utils.delete(
                    "/indexes/tags/provisioner/%s" %
                    NS.tendrl_context.integration_id,
                    recursive=True
                )
            int_srvr_list = etcd_utils.read(
                "/indexes/tags/tendrl/integration/gluster"
            ).value
            int_srvr_list = json.loads(int_srvr_list)
            if NS.node_context.node_id in int_srvr_list:
                int_srvr_list.remove(NS.node_context.node_id)
            etcd_utils.write(
                "/indexes/tags/tendrl/integration/gluster",
                json.dumps(int_srvr_list)
            )
        except etcd.EtcdKeyNotFound:
            logger.log(
                "debug",
                NS.publisher_id,
                {
                    "message": "Couldnt remove node from "
                    "gluster servers list tag."
                    "integration_id: %s, node_id: %s" %
                    (
                        NS.tendrl_context.integration_id,
                        NS.node_context.node_id
                    )
                }
            )
            pass

        complete.set()
        m.stop()

    def reload_config(signum, frame):
        logger.log(
            "debug",
            NS.publisher_id,
            {
                "message": "Signal handler: SIGHUP,"
                " reload service config"
            }
        )
        NS.gluster.ns.setup_common_objects()

    signal.signal(signal.SIGTERM, shutdown)
    signal.signal(signal.SIGINT, shutdown)
    signal.signal(signal.SIGHUP, reload_config)

    while not complete.is_set():
        complete.wait(timeout=1)
def main():
    # NS.node_agent contains the config object,
    # hence initialize it before any other NS
    node_agent.NodeAgentNS()
    # Init NS.tendrl
    TendrlNS()

    # Init NS.provisioning
    # TODO(team) remove NS.provisioner and use NS.provisioning.{ceph, gluster}
    # provisioning.ProvisioningNS()

    # Init NS.integrations.ceph
    # TODO(team) add all short circuited ceph(import/create) NS.tendrl.flows
    #  to NS.integrations.ceph
    # ceph.CephIntegrationNS()

    # Init NS.integrations.gluster
    # TODO(team) add all short circuited ceph(import/create) NS.tendrl.flows
    #  to NS.integrations.ceph
    GlusterIntegrationNS()

    # Compile all definitions
    NS.compiled_definitions = \
        NS.node_agent.objects.CompiledDefinitions()
    NS.compiled_definitions.merge_definitions([
        NS.tendrl.definitions, NS.node_agent.definitions,
        NS.integrations.gluster.definitions
    ])
    NS.node_agent.compiled_definitions = NS.compiled_definitions

    # Every process needs to set a NS.type
    # Allowed types are "node", "integration", "monitoring"
    NS.type = "node"

    NS.first_node_inventory_sync = True
    NS.state_sync_thread = node_sync.NodeAgentSyncThread()

    NS.compiled_definitions.save()
    NS.node_context.save()

    NS.tendrl_context.save()
    NS.node_agent.definitions.save()
    # NS.integrations.ceph.definitions.save()
    NS.node_agent.config.save()
    NS.publisher_id = "node_agent"
    NS.message_handler_thread = MessageHandler()

    NS.gluster_provisioner = GlusterProvisioningManager(
        NS.tendrl.definitions.get_parsed_defs()["namespace.tendrl"]
        ['gluster_provisioner'])
    if NS.config.data.get("with_internal_profiling", False):
        from tendrl.commons import profiler
        profiler.start()

    NS.gluster_sds_sync_running = False

    m = NodeAgentManager()
    m.start()

    complete = threading.Event()

    def shutdown(signum, frame):
        Event(
            Message(priority="debug",
                    publisher=NS.publisher_id,
                    payload={"message": "Signal handler: stopping"}))
        complete.set()
        m.stop()

        if NS.gluster_sds_sync_running:
            NS.gluster_integrations_sync_thread.stop()

    def reload_config(signum, frame):
        Event(
            Message(priority="debug",
                    publisher=NS.publisher_id,
                    payload={"message": "Signal handler: SIGHUP"}))
        NS.config = NS.config.__class__()
        NS.config.save()

    signal.signal(signal.SIGTERM, shutdown)
    signal.signal(signal.SIGINT, shutdown)
    signal.signal(signal.SIGHUP, reload_config)

    while not complete.is_set():
        complete.wait(timeout=1)
Exemple #24
0
def main():
    ceph_integration.CephIntegrationNS()
    TendrlNS()

    NS.type = "sds"
    NS.publisher_id = "ceph_integration"

    from tendrl.ceph_integration import sds_sync

    NS.state_sync_thread = sds_sync.CephIntegrationSdsSyncStateThread()

    NS.node_context.save()

    # Check if Integration is part of any Tendrl imported/created sds cluster
    try:
        NS.tendrl_context = NS.tendrl_context.load()
        Event(
            Message(priority="info",
                    publisher=NS.publisher_id,
                    payload={
                        "message":
                        "Integration %s is part of sds cluster" %
                        NS.tendrl_context.integration_id
                    }))

    except etcd.EtcdKeyNotFound:
        Event(
            Message(priority="debug",
                    publisher=NS.publisher_id,
                    payload={
                        "message":
                        "Node %s is not part of any sds cluster" %
                        NS.node_context.node_id
                    }))
        raise Exception("Integration cannot be started, "
                        "please Import or Create sds cluster in Tendrl "
                        "and include Node %s" % NS.node_context.node_id)
    if NS.tendrl_context.integration_id is None:
        Event(
            Message(priority="debug",
                    publisher=NS.publisher_id,
                    payload={
                        "message":
                        "Node %s is not part of any sds cluster" %
                        NS.node_context.node_id
                    }))
        raise Exception("Integration cannot be started, "
                        "please Import or Create sds cluster in Tendrl "
                        "and include Node %s" % NS.node_context.node_id)

    NS.tendrl_context.save()
    NS.ceph.definitions.save()
    NS.ceph.config.save()

    if NS.config.data.get("with_internal_profiling", False):
        from tendrl.commons import profiler
        profiler.start()

    m = CephIntegrationManager()
    m.start()

    complete = gevent.event.Event()

    def shutdown():
        Event(
            Message(priority="info",
                    publisher=NS.publisher_id,
                    payload={"message": "Signal handler: stopping"}))
        complete.set()

    gevent.signal(signal.SIGTERM, shutdown)
    gevent.signal(signal.SIGINT, shutdown)

    while not complete.is_set():
        complete.wait(timeout=1)
def main():
    gluster_integration.GlusterIntegrationNS()
    TendrlNS()

    NS.type = "sds"
    NS.publisher_id = "gluster_integration"

    NS.state_sync_thread = sds_sync.GlusterIntegrationSdsSyncStateThread()

    NS.message_handler_thread = GlusterNativeMessageHandler()

    NS.node_context.save()
    try:
        NS.tendrl_context = NS.tendrl_context.load()
        Event(
            Message(priority="debug",
                    publisher=NS.publisher_id,
                    payload={
                        "message":
                        "Integration %s is part of sds cluster" %
                        NS.tendrl_context.integration_id
                    }))
    except etcd.EtcdKeyNotFound:
        Event(
            Message(priority="debug",
                    publisher=NS.publisher_id,
                    payload={
                        "message":
                        "Node %s is not part of any sds cluster" %
                        NS.node_context.node_id
                    }))
        raise Exception("Integration cannot be started,"
                        " please Import or Create sds cluster"
                        " in Tendrl and include Node %s" %
                        NS.node_context.node_id)

    NS.tendrl_context.save()
    NS.gluster.definitions.save()
    NS.gluster.config.save()

    pm = ProvisioningManager("GdeployPlugin")
    NS.gdeploy_plugin = pm.get_plugin()
    if NS.config.data.get("with_internal_profiling", False):
        from tendrl.commons import profiler
        profiler.start()

    m = GlusterIntegrationManager()
    m.start()

    complete = threading.Event()

    def shutdown(signum, frame):
        Event(
            Message(priority="debug",
                    publisher=NS.publisher_id,
                    payload={"message": "Signal handler: stopping"}))
        complete.set()
        m.stop()

    def reload_config(signum, frame):
        Event(
            Message(priority="debug",
                    publisher=NS.publisher_id,
                    payload={
                        "message":
                        "Signal handler: SIGHUP, reload service config"
                    }))
        NS.config = NS.config.__class__()
        NS.config.save()

    signal.signal(signal.SIGTERM, shutdown)
    signal.signal(signal.SIGINT, shutdown)
    signal.signal(signal.SIGHUP, reload_config)

    while not complete.is_set():
        complete.wait(timeout=1)