def run(self):
        self.setup_logging()
        all_service_config = read_services_configuration()
        system_config = load_system_paasta_config()
        service_replication = self.get_service_replication(
            all_services=all_service_config.keys(),
            synapse_host=system_config.get_default_synapse_host(),
            synapse_port=system_config.get_synapse_port(),
            synapse_haproxy_url_format=system_config.get_synapse_haproxy_url_format(),
        )

        checked_services = []
        for service, service_config in all_service_config.iteritems():
            do_monitoring, monitoring_config = extract_replication_info(
                service_config
            )

            if do_monitoring:
                self.log.debug("Checking {0}".format(service))
                replication = service_replication.get('%s.main' % service, 0)
                event = do_replication_check(service, monitoring_config,
                                             replication)
                checked_services.append(service)
                self.log.debug("Result for {0}: {1}".format(service,
                                                            event['output']))
                report_event(event)
            else:
                self.log.debug("Not checking {0}".format(service))

        self.ok("Finished checking services: {0}".format(checked_services))
    def run(self):
        self.setup_logging()
        all_service_config = read_services_configuration()
        system_config = load_system_paasta_config()
        service_replication = self.get_service_replication(
            all_services=all_service_config.keys(),
            synapse_host=system_config.get_default_synapse_host(),
            synapse_port=system_config.get_synapse_port(),
            synapse_haproxy_url_format=system_config.
            get_synapse_haproxy_url_format(),
        )

        checked_services = []
        for service, service_config in all_service_config.iteritems():
            do_monitoring, monitoring_config = extract_replication_info(
                service_config)

            if do_monitoring:
                self.log.debug("Checking {0}".format(service))
                replication = service_replication.get('%s.main' % service, 0)
                event = do_replication_check(service, monitoring_config,
                                             replication)
                checked_services.append(service)
                self.log.debug("Result for {0}: {1}".format(
                    service, event['output']))
                report_event(event)
            else:
                self.log.debug("Not checking {0}".format(service))

        self.ok("Finished checking services: {0}".format(checked_services))
Пример #3
0
 def test_read_services_configuration(self, read_patch, listdir_patch, abs_patch):
     expected = {'1': 'hello', '2': 'hello', '3': 'hello'}
     actual = service_configuration_lib.read_services_configuration(soa_dir='testdir')
     abs_patch.assert_called_once_with('testdir')
     listdir_patch.assert_called_once_with('nodir')
     read_patch.assert_has_calls(
         [mock.call('nodir', '1'), mock.call('nodir', '2'), mock.call('nodir', '3')],
     )
     assert expected == actual
Пример #4
0
def _load_yelpsoa_configs(context, service):
    all_services = read_services_configuration(soa_dir=context.fake_yelpsoa_configs)
    context.my_config = all_services[service]
Пример #5
0
def list_services():
    """Returns a sorted list of all services"""
    return sorted(read_services_configuration().keys())
Пример #6
0
def _load_yelpsoa_configs(context, service):
    all_services = read_services_configuration(soa_dir=context.fake_yelpsoa_configs)
    context.my_config = all_services[service]
Пример #7
0
def list_services(**kwargs):
    """Returns a sorted list of all services"""
    return sorted(read_services_configuration().keys())
Пример #8
0
def _get_related_jobs_and_configs(cluster, soa_dir=DEFAULT_SOA_DIR):
    """
    For all the Chronos jobs defined in cluster, extract the list of topologically sorted related Chronos.
    Two individual jobs are considered related each other if exists a dependency relationship between them.

    :param cluster: cluster from which extracting the jobs
    :return: tuple(related jobs mapping, jobs configuration)
    """
    chronos_configs = {}
    for service in service_configuration_lib.read_services_configuration(
            soa_dir=soa_dir):
        for instance in read_chronos_jobs_for_service(service=service,
                                                      cluster=cluster,
                                                      soa_dir=soa_dir):
            try:
                chronos_configs[(service, instance)] = load_chronos_job_config(
                    service=service,
                    instance=instance,
                    cluster=cluster,
                    soa_dir=soa_dir,
                )
            except NoDeploymentsAvailable:
                pass

    adjacency_list = defaultdict(
        set)  # List of adjacency used by dfs algorithm
    for instance, config in chronos_configs.items():
        for parent in config.get_parents():
            parent = decompose_job_id(paasta_to_chronos_job_name(parent))
            # Map the graph a undirected graph to simplify identification of connected components
            adjacency_list[parent].add(instance)
            adjacency_list[instance].add(parent)

    def cached_dfs(known_dfs_results,
                   node,
                   neighbours_mapping,
                   ignore_cycles=False):
        """
        Cached version of DFS algorithm (tuned for extraction of connected components).

        :param known_dfs_results: previous results of DFS
        :type known_dfs_results: list
        :param node: starting node for DFS execution
        :param neighbours_mapping: graph adjacency list
        :type neighbours_mapping: dict
        :param ignore_cycles: raise an exception if a cycle is identified in the graph
        :type ignore_cycles: bool

        :return: set of nodes part of the same connected component of `node`
        :type: set
        """
        for known_dfs_result in known_dfs_results:
            if node in known_dfs_result:
                return known_dfs_result

        visited_nodes = set(
            dfs(
                node=node,
                neighbours_mapping=neighbours_mapping,
                ignore_cycles=ignore_cycles,
            ))
        known_dfs_results.append(visited_nodes)

        return visited_nodes

    # Build connected components
    known_connected_components = []
    for instance in chronos_configs:
        cached_dfs(  # Using cached version of DFS to avoid all algorithm execution if the result it already know
            known_dfs_results=known_connected_components,
            node=instance,
            ignore_cycles=
            True,  # Operating on an undirected graph to simplify identification of connected components
            neighbours_mapping=adjacency_list,
        )

    connected_components = {}
    for connected_component in known_connected_components:
        for node in connected_component:
            connected_components[node] = connected_component

    return connected_components, chronos_configs