def generate_template_config(service, template, out_dirname, context):
    """
    Generate the config from the jinja template.

    Args:
        service (str): Name of the magma service. Used for looking up the
                        config and mconfig
        template (str): Name of the input template, which is also used for
                        choosing the output filename
        out_dirname (str): Path of the output file
        context (map): Context to use for Jinja (the .yml config and mconfig
                        will be added into this context)
    """
    # Get the template and the output filenames
    template_filename = _get_template_filename(template)
    out_filename = _get_template_out_filename(template, out_dirname)
    logging.info(
        "Generating config file: [%s] using template: [%s]" % (
            out_filename, template_filename,
        ),
    )
    template_context = {}
    # Generate the content to use from the service yml config and mconfig.
    try:
        template_context.update(load_service_config(service))
    except LoadConfigError as err:
        logging.warning(err)

    template_context.update(context)
    try:
        mconfig = load_service_mconfig_as_json(service)
        template_context.update(mconfig)
    except LoadConfigError as err:
        logging.warning(err)

    # Export snowflake to template.
    # TODO: export a hardware-derived ID that can be used by a field tech
    # to easily identify a specific device.
    template_context.setdefault("snowflake", make_snowflake())

    # Create the config file based on the template
    template_str = open(template_filename, 'r').read()
    output = Template(template_str).render(template_context)
    os.makedirs(out_dirname, exist_ok=True)
    write_to_file_atomically(out_filename, output)
Exemple #2
0
def main():
    """
    Main magmad function
    """
    service = MagmaService('magmad', mconfigs_pb2.MagmaD())

    # Optionally pipe errors to Sentry
    sentry_init(service_name=service.name)

    logging.info('Starting magmad for UUID: %s', snowflake.make_snowflake())

    # Create service manager
    services = service.config.get('magma_services')
    init_system = service.config.get('init_system', 'systemd')
    registered_dynamic_services = service.config.get(
        'registered_dynamic_services',
        [],
    )
    enabled_dynamic_services = []
    if service.mconfig is not None:
        enabled_dynamic_services = service.mconfig.dynamic_services

    # Poll the services' Service303 interface
    service_poller = ServicePoller(
        service.loop,
        service.config,
        enabled_dynamic_services,
    )
    service_poller.start()

    service_manager = ServiceManager(
        services,
        init_system,
        service_poller,
        registered_dynamic_services,
        enabled_dynamic_services,
    )

    # Get metrics service config
    metrics_config = service.config.get('metricsd')
    metrics_services = metrics_config['services']
    collect_interval = metrics_config['collect_interval']
    sync_interval = metrics_config['sync_interval']
    grpc_timeout = metrics_config['grpc_timeout']
    grpc_msg_size = metrics_config.get('max_grpc_msg_size_mb', 4)
    metrics_post_processor_fn = metrics_config.get('post_processing_fn')

    metric_scrape_targets = [
        ScrapeTarget(t['url'], t['name'], t['interval'])
        for t in metrics_config.get('metric_scrape_targets', [])
    ]

    # Create local metrics collector
    metrics_collector = MetricsCollector(
        services=metrics_services,
        collect_interval=collect_interval,
        sync_interval=sync_interval,
        grpc_timeout=grpc_timeout,
        grpc_max_msg_size_mb=grpc_msg_size,
        loop=service.loop,
        post_processing_fn=get_metrics_postprocessor_fn(
            metrics_post_processor_fn, ),
        scrape_targets=metric_scrape_targets,
    )

    # Poll and sync the metrics collector loops
    metrics_collector.run()

    # Start a background thread to stream updates from the cloud
    stream_client = None
    if service.config.get('enable_config_streamer', False):
        stream_client = StreamerClient(
            {
                CONFIG_STREAM_NAME:
                ConfigManager(
                    services,
                    service_manager,
                    service,
                    MconfigManagerImpl(),
                ),
            },
            service.loop,
        )

    # Create sync rpc client with a heartbeat of 30 seconds (timeout = 60s)
    sync_rpc_client = None
    if service.config.get('enable_sync_rpc', False):
        sync_rpc_client = SyncRPCClient(
            service.loop,
            30,
            service.config.get('print_grpc_payload', False),
        )

    first_time_bootstrap = True

    # This is called when bootstrap succeeds and when _bootstrap_check is
    # invoked but bootstrap is not needed. If it's invoked right after certs
    # are generated, certs_generated is true, control_proxy will restart.
    async def bootstrap_success_cb(certs_generated: bool):
        nonlocal first_time_bootstrap
        if first_time_bootstrap:
            if stream_client:
                stream_client.start()
            if sync_rpc_client:
                sync_rpc_client.start()
            first_time_bootstrap = False
        if certs_generated:
            svcs_to_restart = []
            if 'control_proxy' in services:
                svcs_to_restart.append('control_proxy')

            # fluent-bit caches TLS client certs in memory, so we need to
            # restart it whenever the certs change
            fresh_mconfig = get_mconfig_manager().load_service_mconfig(
                'magmad',
                mconfigs_pb2.MagmaD(),
            )
            dynamic_svcs = fresh_mconfig.dynamic_services or []
            if 'td-agent-bit' in dynamic_svcs:
                svcs_to_restart.append('td-agent-bit')

            await service_manager.restart_services(services=svcs_to_restart)

    # Create bootstrap manager
    bootstrap_manager = BootstrapManager(service, bootstrap_success_cb)

    # Initialize kernel version poller if it is enabled
    kernel_version_poller = None
    if service.config.get('enable_kernel_version_checking', False):
        kernel_version_poller = KernelVersionsPoller(service)
        kernel_version_poller.start()

    # gateway status generator to bundle various information about this
    # gateway into an object.
    gateway_status_factory = GatewayStatusFactory(
        service=service,
        service_poller=service_poller,
        kernel_version_poller=kernel_version_poller,
    )

    # _grpc_client_manager to manage grpc client recycling
    grpc_client_manager = GRPCClientManager(
        service_name="state",
        service_stub=StateServiceStub,
        max_client_reuse=60,
    )

    # Initialize StateReporter
    state_reporter = StateReporter(
        config=service.config,
        mconfig=service.mconfig,
        loop=service.loop,
        bootstrap_manager=bootstrap_manager,
        gw_status_factory=gateway_status_factory,
        grpc_client_manager=grpc_client_manager,
    )

    # Initialize ServiceHealthWatchdog
    service_health_watchdog = ServiceHealthWatchdog(
        config=service.config,
        loop=service.loop,
        service_poller=service_poller,
        service_manager=service_manager,
    )

    # Start _bootstrap_manager
    bootstrap_manager.start_bootstrap_manager()

    # Start all services when magmad comes up
    service.loop.create_task(service_manager.start_services())

    # Start state reporting loop
    state_reporter.start()

    # Start service timeout health check loop
    service_health_watchdog.start()

    # Start upgrade manager loop
    if service.config.get('enable_upgrade_manager', False):
        upgrader = _get_upgrader_impl(service)
        service.loop.create_task(start_upgrade_loop(service, upgrader))

    # Start network health metric collection
    if service.config.get('enable_network_monitor', False):
        service.loop.create_task(metrics_collection_loop(service.config))

    # Create generic command executor
    command_executor = None
    if service.config.get('generic_command_config', None):
        command_executor = get_command_executor_impl(service)

    # Start loop to monitor unattended upgrade status
    service.loop.create_task(monitor_unattended_upgrade_status())

    # Add all servicers to the server
    magmad_servicer = MagmadRpcServicer(
        service,
        services,
        service_manager,
        get_mconfig_manager(),
        command_executor,
        service.loop,
        service.config.get('print_grpc_payload', False),
    )
    magmad_servicer.add_to_server(service.rpc_server)

    if SDWatchdog.has_notify():
        # Create systemd watchdog
        sdwatchdog = SDWatchdog(
            tasks=[bootstrap_manager, state_reporter],
            update_status=True,
        )
        # Start watchdog loop
        service.loop.create_task(sdwatchdog.run())

    # Run the service loop
    service.run()

    # Cleanup the service
    service.close()
Exemple #3
0
def main():
    """
    Main magmad function
    """
    service = MagmaService('magmad')

    logging.info('Starting magmad for UUID: %s', snowflake.make_snowflake())

    # Create service manager
    services = service.config['magma_services']
    init_system = service.config.get('init_system', 'systemd')
    registered_dynamic_services = service.config.get(
        'registered_dynamic_services', [])
    enabled_dynamic_services = []
    if service.mconfig is not None:
        enabled_dynamic_services = service.mconfig.dynamic_services

    # Poll the services' Service303 interface
    service_poller = ServicePoller(service.loop, service.config)
    service_poller.start()

    service_manager = ServiceManager(services, init_system, service_poller,
                                     registered_dynamic_services,
                                     enabled_dynamic_services)

    # Start a background thread to stream updates from the cloud
    stream_client = None
    if service.config.get('enable_config_streamer', False):
        stream_client = StreamerClient(
            {
                CONFIG_STREAM_NAME:
                ConfigManager(
                    services,
                    service_manager,
                    service,
                    MconfigManagerImpl(),
                ),
                MCONFIG_VIEW_STREAM_NAME:
                StreamingMconfigCallback(
                    services,
                    service_manager,
                    service,
                    StreamedMconfigManager(),
                )
            },
            service.loop,
        )

    # Schedule periodic checkins
    checkin_manager = CheckinManager(service, service_poller)

    # Create sync rpc client with a timeout of 60 seconds
    sync_rpc_client = None
    if service.config.get('enable_sync_rpc', False):
        sync_rpc_client = SyncRPCClient(service.loop, 60)

    first_time_bootstrap = True

    # This is called when bootstrap succeeds and when _bootstrap_check is
    # invoked but bootstrap is not needed. If it's invoked right after certs
    # are generated, certs_generated is true, control_proxy will restart.
    def bootstrap_success_cb(certs_generated):
        nonlocal first_time_bootstrap
        if first_time_bootstrap:
            if stream_client:
                stream_client.start()
            checkin_manager.try_checkin()
            if sync_rpc_client:
                sync_rpc_client.start()
            first_time_bootstrap = False
        if certs_generated and 'control_proxy' in services:
            service.loop.create_task(
                service_manager.restart_services(services=['control_proxy']))

    # Create bootstrap manager
    bootstrap_manager = BootstrapManager(service, bootstrap_success_cb)

    def checkin_failure_cb(err_code):
        bootstrap_manager.on_checkin_fail(err_code)

    checkin_manager.set_failure_cb(checkin_failure_cb)

    # Start bootstrap_manager after checkin_manager's callback is set
    bootstrap_manager.start_bootstrap_manager()

    # Start all services when magmad comes up
    service.loop.create_task(service_manager.start_services())

    # Start upgrade manager loop
    if service.config.get('enable_upgrade_manager', False):
        upgrader = _get_upgrader_impl(service)
        service.loop.create_task(start_upgrade_loop(service, upgrader))

    # Start network health metric collection
    if service.config.get('enable_network_monitor', False):
        service.loop.create_task(metrics_collection_loop(service.config))

    if service.config.get('enable_systemd_tailer', False):
        service.loop.create_task(start_systemd_tailer(service.config))

    # Start loop to monitor unattended upgrade status
    service.loop.create_task(monitor_unattended_upgrade_status(service.loop))

    # Add all servicers to the server
    magmad_servicer = MagmadRpcServicer(
        service,
        services,
        service_manager,
        get_mconfig_manager(),
        service.loop,
    )
    magmad_servicer.add_to_server(service.rpc_server)

    if SDWatchdog.has_notify():
        # Create systemd watchdog
        sdwatchdog = SDWatchdog(tasks=[bootstrap_manager, checkin_manager],
                                update_status=True)
        # Start watchdog loop
        service.loop.create_task(sdwatchdog.run())

    # Run the service loop
    service.run()

    # Cleanup the service
    service.close()
Exemple #4
0
def main():
    """
    Main magmad function
    """
    service = MagmaService('magmad', mconfigs_pb2.MagmaD())

    logging.info('Starting magmad for UUID: %s', snowflake.make_snowflake())

    # Create service manager
    services = service.config['magma_services']
    init_system = service.config.get('init_system', 'systemd')
    registered_dynamic_services = service.config.get(
        'registered_dynamic_services', [])
    enabled_dynamic_services = []
    if service.mconfig is not None:
        enabled_dynamic_services = service.mconfig.dynamic_services

    # Poll the services' Service303 interface
    service_poller = ServicePoller(service.loop, service.config)
    service_poller.start()

    service_manager = ServiceManager(services, init_system, service_poller,
                                     registered_dynamic_services,
                                     enabled_dynamic_services)

    # Get metrics service config
    metrics_config = service.config['metricsd']
    metrics_services = metrics_config['services']
    collect_interval = metrics_config['collect_interval']
    sync_interval = metrics_config['sync_interval']
    grpc_timeout = metrics_config['grpc_timeout']
    queue_length = metrics_config['queue_length']
    metrics_post_processor_fn = metrics_config.get('post_processing_fn')

    # Create local metrics collector
    metrics_collector = MetricsCollector(
        metrics_services,
        collect_interval,
        sync_interval,
        grpc_timeout,
        queue_length,
        service.loop,
        get_metrics_postprocessor_fn(metrics_post_processor_fn),
    )

    # Poll and sync the metrics collector loops
    metrics_collector.run()

    # Start a background thread to stream updates from the cloud
    stream_client = None
    if service.config.get('enable_config_streamer', False):
        stream_client = StreamerClient(
            {
                CONFIG_STREAM_NAME:
                ConfigManager(
                    services,
                    service_manager,
                    service,
                    MconfigManagerImpl(),
                ),
            },
            service.loop,
        )

    # Schedule periodic checkins
    checkin_manager = CheckinManager(service, service_poller)

    # Create sync rpc client with a heartbeat of 30 seconds (timeout = 60s)
    sync_rpc_client = None
    if service.config.get('enable_sync_rpc', False):
        sync_rpc_client = SyncRPCClient(service.loop, 30)

    first_time_bootstrap = True

    # This is called when bootstrap succeeds and when _bootstrap_check is
    # invoked but bootstrap is not needed. If it's invoked right after certs
    # are generated, certs_generated is true, control_proxy will restart.
    async def bootstrap_success_cb(certs_generated):
        nonlocal first_time_bootstrap
        if first_time_bootstrap:
            if stream_client:
                stream_client.start()
            await checkin_manager.try_checkin()
            if sync_rpc_client:
                sync_rpc_client.start()
            first_time_bootstrap = False
        if certs_generated and 'control_proxy' in services:
            service.loop.create_task(
                service_manager.restart_services(services=['control_proxy']))

    # Create bootstrap manager
    bootstrap_manager = BootstrapManager(service, bootstrap_success_cb)

    async def checkin_failure_cb(err_code):
        await bootstrap_manager.on_checkin_fail(err_code)

    checkin_manager.set_failure_cb(checkin_failure_cb)

    # Start bootstrap_manager after checkin_manager's callback is set
    bootstrap_manager.start_bootstrap_manager()

    # Schedule periodic state reporting
    state_manager = StateReporter(service, checkin_manager)
    state_manager.start()

    # Start all services when magmad comes up
    service.loop.create_task(service_manager.start_services())

    # Start upgrade manager loop
    if service.config.get('enable_upgrade_manager', False):
        upgrader = _get_upgrader_impl(service)
        service.loop.create_task(start_upgrade_loop(service, upgrader))

    # Start network health metric collection
    if service.config.get('enable_network_monitor', False):
        service.loop.create_task(metrics_collection_loop(service.config))

    if service.config.get('enable_systemd_tailer', False):
        service.loop.create_task(start_systemd_tailer(service.config))

    # Create generic command executor
    command_executor = None
    if service.config.get('generic_command_config', None):
        command_executor = get_command_executor_impl(service)

    # Start loop to monitor unattended upgrade status
    service.loop.create_task(monitor_unattended_upgrade_status(service.loop))

    # Add all servicers to the server
    magmad_servicer = MagmadRpcServicer(
        service,
        services,
        service_manager,
        get_mconfig_manager(),
        command_executor,
        service.loop,
    )
    magmad_servicer.add_to_server(service.rpc_server)

    if SDWatchdog.has_notify():
        # Create systemd watchdog
        sdwatchdog = SDWatchdog(tasks=[bootstrap_manager, checkin_manager],
                                update_status=True)
        # Start watchdog loop
        service.loop.create_task(sdwatchdog.run())

    # Run the service loop
    service.run()

    # Cleanup the service
    service.close()
Exemple #5
0
def startServer(localSettings='localSettings',
                appPort=7000,
                logFilename="logFile.log"):
    global theApp
    bufferSize = 64 * 1024
    password = str(
        snowflake.make_snowflake(
            snowflake_file=os.path.join(base_dir, 'snowflake')))
    manager = multiprocessing.Manager()
    servers = manager.list()
    workers = manager.list()
    backupInfo = manager.dict()
    for i in range(0, 10000):
        workers.append(-1)
    if os.path.isfile(os.path.join(base_dir, 'secretEnc')):
        pyAesCrypt.decryptFile(os.path.join(base_dir, "secretEnc"),
                               os.path.join(base_dir, "secretPlain"), password,
                               bufferSize)
        file = open(os.path.join(base_dir, "secretPlain"), 'r')
        aList = json.load(file)
        for server in aList:
            servers.append(server)
        file.close()
        os.remove(os.path.join(base_dir, "secretPlain"))
    if os.path.isabs(localSettings):
        setFile = localSettings
    else:
        setFile = os.path.join(base_dir, localSettings)
    globals = setupLocalSettings(setFile)
    theApp = app.createApp(theServers=servers, theWorkers=workers)
    p = multiprocessing.Process(target=backupServer.startBackup,
                                args=(os.path.join(base_dir, 'static',
                                                   'rsync'), backupInfo,
                                      setFile))
    p.start()
    multiprocesses.append({
        'name': 'Backup',
        'pid': p.pid,
        'description': 'DAQBroker backup process'
    })
    time.sleep(1)
    p = multiprocessing.Process(target=logServer.logServer,
                                args=(globals["logport"], base_dir),
                                kwargs={'logFilename': logFilename})
    p.start()
    multiprocesses.append({
        'name': 'Logger',
        'pid': p.pid,
        'description': 'DAQBroker log process'
    })
    time.sleep(1)
    p = multiprocessing.Process(target=commServer.collector,
                                args=(servers, globals["commport"],
                                      globals["logport"], backupInfo, setFile))
    p.start()
    multiprocesses.append({
        'name': 'Collector',
        'pid': p.pid,
        'description': 'DAQBroker message collector process'
    })
    time.sleep(1)
    p = multiprocessing.Process(target=monitorServer.producer,
                                args=(servers, globals["commport"],
                                      globals["logport"], False, backupInfo,
                                      workers, setFile))
    p.start()
    multiprocesses.append({
        'name':
        'Producer',
        'pid':
        p.pid,
        'description':
        'DAQBroker broadcasting server process'
    })
    time.sleep(1)
    print("STARTED", multiprocesses)
    http_server = HTTPServer(WSGIContainer(theApp))
    http_server.listen(appPort)
    webbrowser.open('http://localhost:' + str(appPort) + "/daqbroker")
    IOLoop.instance().start()