def setUpModule(): # client_vfs needs a config to be loaded to work. flags.FLAGS.config = package.ResourcePath( "grr-response-test", "grr_response_test/test_data/dummyconfig.yaml") config_lib.ParseConfigCommandLine() client_vfs.Init()
def TestInit(): """Only used in tests and will rerun all the hooks to create a clean state.""" global INIT_RAN stats_collector = prometheus_stats_collector.PrometheusStatsCollector() stats_collector_instance.Set(stats_collector) # Tests use both the server template grr_server.yaml as a primary config file # (this file does not contain all required options, e.g. private keys), and # additional configuration in test_data/grr_test.yaml which contains typical # values for a complete installation. flags.FLAGS.config = package.ResourcePath("grr-response-core", "install_data/etc/grr-server.yaml") flags.FLAGS.secondary_configs.append( package.ResourcePath("grr-response-test", "grr_response_test/test_data/grr_test.yaml")) # This config contains non-public settings that should be applied during # tests. extra_test_config = config.CONFIG["Test.additional_test_config"] if os.path.exists(extra_test_config): flags.FLAGS.secondary_configs.append(extra_test_config) # Prevent using the default writeback location since it may clash with local # setup. writeback_filepath = temp.TempFilePath(prefix="grr_writeback", suffix=".yaml") config.CONFIG.global_override["Config.writeback"] = writeback_filepath # Tests additionally add a test configuration file. config_lib.SetPlatformArchContext() config_lib.ParseConfigCommandLine() # We are running a test so let the config system know that. config.CONFIG.AddContext(contexts.TEST_CONTEXT, "Context applied when we run tests.") if not INIT_RAN: server_logging.ServerLoggingStartupInit() server_logging.SetTestVerbosity() blob_store_test_lib.UseTestBlobStore() data_store.InitializeDataStore() artifact.LoadArtifactsOnce() checks.LoadChecksFromFilesystemOnce() client_approval_auth.InitializeClientApprovalAuthorizationManagerOnce() email_alerts.InitializeEmailAlerterOnce() http_api.InitializeHttpRequestHandlerOnce() ip_resolver.IPResolverInitOnce() stats_server.InitializeStatsServerOnce() webauth.InitializeWebAuthOnce() if not utils.TimeBasedCache.house_keeper_thread: utils.TimeBasedCache() utils.TimeBasedCache.house_keeper_thread.exit = True utils.TimeBasedCache.house_keeper_thread.join() INIT_RAN = True
def main(argv): del argv # Unused. config_lib.ParseConfigCommandLine() port = config.CONFIG["SharedMemoryDB.port"] server = shared_mem_db.SharedMemoryDBServer(port) server.serve_forever()
def main(argv): del argv # Unused. config_lib.ParseConfigCommandLine() port = config.CONFIG["SharedFakeDataStore.port"] server = shared_fake_data_store.SharedFakeDataStoreServer(port) server.serve_forever()
def Init(): """Run all required startup routines and initialization hooks.""" # Set up a temporary syslog handler so we have somewhere to log problems # with ConfigInit() which needs to happen before we can start our create our # proper logging setup. syslog_logger = logging.getLogger("TempLogger") if os.path.exists("/dev/log"): handler = logging.handlers.SysLogHandler(address="/dev/log") else: handler = logging.handlers.SysLogHandler() syslog_logger.addHandler(handler) # The default behavior of server components is to raise errors when # encountering unknown config options. flags.FLAGS.disallow_missing_config_definitions = True try: config_lib.SetPlatformArchContext() config_lib.ParseConfigCommandLine(rename_invalid_writeback=False) except config_lib.Error: syslog_logger.exception("Died during config initialization") raise stats_collector = prometheus_stats_collector.PrometheusStatsCollector( registry=prometheus_client.REGISTRY) stats_collector_instance.Set(stats_collector) server_logging.ServerLoggingStartupInit() bs_registry_init.RegisterBlobStores() all_decoders.Register() all_parsers.Register() ec_registry_init.RegisterExportConverters() gui_api_registry_init.RegisterApiCallRouters() data_store.InitializeDataStore() if contexts.ADMIN_UI_CONTEXT in config.CONFIG.context: api_auth_manager.InitializeApiAuthManager() artifact.LoadArtifactsOnce() # Requires aff4.AFF4Init. checks.LoadChecksFromFilesystemOnce() client_approval_auth.InitializeClientApprovalAuthorizationManagerOnce() cronjobs.InitializeCronWorkerOnce() email_alerts.InitializeEmailAlerterOnce() http_api.InitializeHttpRequestHandlerOnce() ip_resolver.IPResolverInitOnce() stats_server.InitializeStatsServerOnce() webauth.InitializeWebAuthOnce() # Exempt config updater from this check because it is the one responsible for # setting the variable. if not config.CONFIG.ContextApplied("ConfigUpdater Context"): if not config.CONFIG.Get("Server.initialized"): raise RuntimeError( "Config not initialized, run \"grr_config_updater" " initialize\". If the server is already configured," " add \"Server.initialized: True\" to your config.")
def TestInit(): """Only used in tests and will rerun all the hooks to create a clean state.""" global INIT_RAN metric_metadata = server_metrics.GetMetadata() metric_metadata.extend(client_metrics.GetMetadata()) metric_metadata.extend(communicator.GetMetricMetadata()) stats_collector = default_stats_collector.DefaultStatsCollector( metric_metadata) stats_collector_instance.Set(stats_collector) # Tests use both the server template grr_server.yaml as a primary config file # (this file does not contain all required options, e.g. private keys), and # additional configuration in test_data/grr_test.yaml which contains typical # values for a complete installation. flags.FLAGS.config = package.ResourcePath( "grr-response-core", "install_data/etc/grr-server.yaml") flags.FLAGS.secondary_configs.append( package.ResourcePath("grr-response-test", "grr_response_test/test_data/grr_test.yaml")) # This config contains non-public settings that should be applied during # tests. extra_test_config = config.CONFIG["Test.additional_test_config"] if os.path.exists(extra_test_config): flags.FLAGS.secondary_configs.append(extra_test_config) # Tests additionally add a test configuration file. config_lib.SetPlatformArchContext() config_lib.ParseConfigCommandLine() # We are running a test so let the config system know that. config.CONFIG.AddContext(contexts.TEST_CONTEXT, "Context applied when we run tests.") test_ds = flags.FLAGS.test_data_store if test_ds is None: test_ds = compatibility.GetName(fake_data_store.FakeDataStore) config.CONFIG.Set("Datastore.implementation", test_ds) if not INIT_RAN: server_logging.ServerLoggingStartupInit() server_logging.SetTestVerbosity() blob_store_test_lib.UseTestBlobStore() registry.TestInit() db = data_store.DB.SetupTestDB() if db: data_store.DB = db data_store.DB.Initialize() aff4.AFF4InitHook().Run() INIT_RAN = True
def TestInit(): """Only used in tests and will rerun all the hooks to create a clean state.""" global INIT_RAN if stats.STATS is None: stats.STATS = stats.StatsCollector() threadpool.InitializeMetrics() # Tests use both the server template grr_server.yaml as a primary config file # (this file does not contain all required options, e.g. private keys), and # additional configuration in test_data/grr_test.yaml which contains typical # values for a complete installation. flags.FLAGS.config = config_lib.Resource().Filter( "install_data/etc/grr-server.yaml@grr-response-core") flags.FLAGS.secondary_configs.append(config_lib.Resource().Filter( "grr_response_test/test_data/grr_test.yaml@grr-response-test")) # This config contains non-public settings that should be applied during # tests. extra_test_config = config.CONFIG["Test.additional_test_config"] if os.path.exists(extra_test_config): flags.FLAGS.secondary_configs.append(extra_test_config) # Tests additionally add a test configuration file. config_lib.SetPlatformArchContext() config_lib.ParseConfigCommandLine() # We are running a test so let the config system know that. config.CONFIG.AddContext(contexts.TEST_CONTEXT, "Context applied when we run tests.") test_ds = flags.FLAGS.test_data_store if test_ds is None: test_ds = utils.GetName(fake_data_store.FakeDataStore) config.CONFIG.Set("Datastore.implementation", test_ds) config.CONFIG.Set("Blobstore.implementation", utils.GetName(db_blob_store.DbBlobstore)) if not INIT_RAN: server_logging.ServerLoggingStartupInit() server_logging.SetTestVerbosity() registry.TestInit() db = data_store.DB.SetupTestDB() if db: data_store.DB = db data_store.DB.Initialize() aff4.AFF4InitHook().Run() INIT_RAN = True
def GetClientConfig(filename): """Write client config to filename.""" config_lib.SetPlatformArchContext() config_lib.ParseConfigCommandLine() context = list(grr_config.CONFIG.context) context.append("Client Context") # Disable timestamping so we can get a reproducible and cacheable config file. config_data = build_helpers.GetClientConfig( context, validate=True, deploy_timestamp=False) with open(filename, "w") as fd: fd.write(config_data) build_helpers.WriteBuildYaml(fd, build_timestamp=False, context=context)
def ClientInit(): """Run all startup routines for the client.""" if stats.STATS is None: stats.STATS = stats.StatsCollector() config_lib.SetPlatformArchContext() config_lib.ParseConfigCommandLine() client_logging.LogInit() registry.Init() if not config.CONFIG.ContextApplied(contexts.CLIENT_BUILD_CONTEXT): config.CONFIG.Persist("Client.labels") config.CONFIG.Persist("Client.proxy_servers") config.CONFIG.Persist("Client.tempdir_roots")
def ClientInit(): """Run all startup routines for the client.""" registry_init.RegisterClientActions() stats_collector_instance.Set(default_stats_collector.DefaultStatsCollector()) config_lib.SetPlatformArchContext() config_lib.ParseConfigCommandLine() client_logging.LogInit() all_parsers.Register() if not config.CONFIG.ContextApplied(contexts.CLIENT_BUILD_CONTEXT): config.CONFIG.Persist("Client.labels") config.CONFIG.Persist("Client.proxy_servers") config.CONFIG.Persist("Client.tempdir_roots")
def Init(): """Run all required startup routines and initialization hooks.""" global INIT_RAN if INIT_RAN: return # Set up a temporary syslog handler so we have somewhere to log problems # with ConfigInit() which needs to happen before we can start our create our # proper logging setup. syslog_logger = logging.getLogger("TempLogger") if os.path.exists("/dev/log"): handler = logging.handlers.SysLogHandler(address="/dev/log") else: handler = logging.handlers.SysLogHandler() syslog_logger.addHandler(handler) try: config_lib.SetPlatformArchContext() config_lib.ParseConfigCommandLine() except config_lib.Error: syslog_logger.exception("Died during config initialization") raise metric_metadata = server_metrics.GetMetadata() metric_metadata.extend(communicator.GetMetricMetadata()) stats_collector = prometheus_stats_collector.PrometheusStatsCollector( metric_metadata, registry=prometheus_client.REGISTRY) stats_collector_instance.Set(stats_collector) server_logging.ServerLoggingStartupInit() bs_registry_init.RegisterBlobStores() all_decoders.Register() all_parsers.Register() registry.Init() # Exempt config updater from this check because it is the one responsible for # setting the variable. if not config.CONFIG.ContextApplied("ConfigUpdater Context"): if not config.CONFIG.Get("Server.initialized"): raise RuntimeError( "Config not initialized, run \"grr_config_updater" " initialize\". If the server is already configured," " add \"Server.initialized: True\" to your config.") INIT_RAN = True
def ClientInit(): """Run all startup routines for the client.""" metric_metadata = client_metrics.GetMetadata() metric_metadata.extend(communicator.GetMetricMetadata()) stats_collector_instance.Set( default_stats_collector.DefaultStatsCollector(metric_metadata)) config_lib.SetPlatformArchContext() config_lib.ParseConfigCommandLine() client_logging.LogInit() all_parsers.Register() if not config.CONFIG.ContextApplied(contexts.CLIENT_BUILD_CONTEXT): config.CONFIG.Persist("Client.labels") config.CONFIG.Persist("Client.proxy_servers") config.CONFIG.Persist("Client.tempdir_roots")
def Init(): """Run all required startup routines and initialization hooks.""" global INIT_RAN if INIT_RAN: return # Set up a temporary syslog handler so we have somewhere to log problems # with ConfigInit() which needs to happen before we can start our create our # proper logging setup. syslog_logger = logging.getLogger("TempLogger") if os.path.exists("/dev/log"): handler = logging.handlers.SysLogHandler(address="/dev/log") else: handler = logging.handlers.SysLogHandler() syslog_logger.addHandler(handler) try: config_lib.SetPlatformArchContext() config_lib.ParseConfigCommandLine() except config_lib.Error: syslog_logger.exception("Died during config initialization") raise if hasattr(registry_init, "stats"): logging.debug("Using local stats collector.") stats.STATS = registry_init.stats.StatsCollector() else: logging.debug("Using default stats collector.") stats.STATS = stats.StatsCollector() threadpool.InitializeMetrics() server_logging.ServerLoggingStartupInit() registry.Init() # Exempt config updater from this check because it is the one responsible for # setting the variable. if not config.CONFIG.ContextApplied("ConfigUpdater Context"): if not config.CONFIG.Get("Server.initialized"): raise RuntimeError( "Config not initialized, run \"grr_config_updater" " initialize\". If the server is already configured," " add \"Server.initialized: True\" to your config.") INIT_RAN = True
def main(args): """Launch the appropriate builder.""" grr_config.CONFIG.AddContext(contexts.CLIENT_BUILD_CONTEXT) if args.subparser_name == "generate_client_config": # We don't need a full init to just build a config. GetClientConfig(args.client_config_output) return # TODO(user): Find out if adding the client-builder context is still # necessary. context = FLAGS.context context.append(contexts.CLIENT_BUILD_CONTEXT) config_lib.SetPlatformArchContext() config_lib.ParseConfigCommandLine() # Use basic console output logging so we can see what is happening. logger = logging.getLogger() handler = logging.StreamHandler() handler.setLevel(logging.DEBUG if FLAGS.verbose else logging.INFO) logger.handlers = [handler] if args.subparser_name == "build": if grr_config.CONFIG["Client.fleetspeak_enabled"]: if grr_config.CONFIG.ContextApplied("Platform:Darwin"): if not grr_config.CONFIG.Get("ClientBuilder.install_dir"): raise RuntimeError( "ClientBuilder.install_dir must be set.") if not grr_config.CONFIG.Get( "ClientBuilder.fleetspeak_plist_path"): raise RuntimeError( "ClientBuilder.fleetspeak_plist_path must be set.") TemplateBuilder().BuildTemplate(context=context, output=args.output) elif args.subparser_name == "repack": if args.debug_build: context.append("DebugClientBuild Context") result_path = repacking.TemplateRepacker().RepackTemplate( args.template, args.output_dir, context=context, sign=args.sign, signed_template=args.signed_template) if not result_path: raise ErrorDuringRepacking(" ".join(sys.argv[:])) elif args.subparser_name == "repack_multiple": # Resolve globs manually on Windows. templates = [] for template in args.templates: if "*" in template: templates.extend(glob.glob(template)) else: # This could go through glob but then we'd swallow errors for # non existing files. templates.append(template) repack_configs = [] for repack_config in args.repack_configs: if "*" in repack_config: repack_configs.extend(glob.glob(repack_config)) else: # This could go through glob but then we'd swallow errors for # non existing files. repack_configs.append(repack_config) MultiTemplateRepacker().RepackTemplates( repack_configs, templates, args.output_dir, config=FLAGS.config, sign=args.sign, signed_template=args.signed_template) elif args.subparser_name == "sign_template": repacking.TemplateRepacker().SignTemplate(args.template, args.output_file, context=context) if not os.path.exists(args.output_file): raise RuntimeError("Signing failed: output not written")
def main(args): """Main.""" grr_config.CONFIG.AddContext(contexts.COMMAND_LINE_CONTEXT) grr_config.CONFIG.AddContext(contexts.CONFIG_UPDATER_CONTEXT) if args.subparser_name == "initialize": config_lib.ParseConfigCommandLine() if args.noprompt: config_updater_util.InitializeNoPrompt( grr_config.CONFIG, external_hostname=args.external_hostname, admin_password=args.admin_password, mysql_hostname=args.mysql_hostname, mysql_port=args.mysql_port, mysql_username=args.mysql_username, mysql_password=args.mysql_password, mysql_db=args.mysql_db, mysql_client_key_path=args.mysql_client_key_path, mysql_client_cert_path=args.mysql_client_cert_path, mysql_ca_cert_path=args.mysql_ca_cert_path, redownload_templates=args.redownload_templates, repack_templates=not args.norepack_templates) else: config_updater_util.Initialize( grr_config.CONFIG, external_hostname=args.external_hostname, admin_password=args.admin_password, redownload_templates=args.redownload_templates, repack_templates=not args.norepack_templates) return server_startup.Init() try: print("Using configuration %s" % grr_config.CONFIG) except AttributeError: raise RuntimeError("No valid config specified.") if args.subparser_name == "generate_keys": try: config_updater_keys_util.GenerateKeys( grr_config.CONFIG, overwrite_keys=args.overwrite_keys) except RuntimeError as e: # GenerateKeys will raise if keys exist and overwrite_keys is not set. print("ERROR: %s" % e) sys.exit(1) grr_config.CONFIG.Write() elif args.subparser_name == "repack_clients": upload = not args.noupload repacking.TemplateRepacker().RepackAllTemplates(upload=upload) elif args.subparser_name == "show_user": if args.username: print(config_updater_util.GetUserSummary(args.username)) else: print(config_updater_util.GetAllUserSummaries()) elif args.subparser_name == "update_user": config_updater_util.UpdateUser(args.username, password=args.password, is_admin=args.admin) elif args.subparser_name == "delete_user": config_updater_util.DeleteUser(args.username) elif args.subparser_name == "add_user": config_updater_util.CreateUser(args.username, password=args.password, is_admin=args.admin) elif args.subparser_name == "upload_python": config_updater_util.UploadSignedBinary( args.file, rdf_objects.SignedBinaryID.BinaryType.PYTHON_HACK, args.platform, upload_subdirectory=args.upload_subdirectory) elif args.subparser_name == "upload_exe": config_updater_util.UploadSignedBinary( args.file, rdf_objects.SignedBinaryID.BinaryType.EXECUTABLE, args.platform, upload_subdirectory=args.upload_subdirectory) elif args.subparser_name == "set_var": var = args.var val = args.val config = grr_config.CONFIG print("Setting %s to %s" % (var, val)) if val.startswith("["): # Allow setting of basic lists. val = val[1:-1].split(",") config.Set(var, val) config.Write() elif args.subparser_name == "switch_datastore": config_updater_util.SwitchToRelDB(grr_config.CONFIG) grr_config.CONFIG.Write() elif args.subparser_name == "upload_artifact": with io.open(args.file, "r") as filedesc: source = filedesc.read() try: artifact.UploadArtifactYamlFile(source, overwrite=args.overwrite_artifact) except rdf_artifacts.ArtifactDefinitionError as e: print("Error %s. You may need to set --overwrite_artifact." % e) elif args.subparser_name == "delete_artifacts": artifact_list = args.artifact if not artifact_list: raise ValueError("No artifact to delete given.") artifact_registry.DeleteArtifactsFromDatastore(artifact_list) print("Artifacts %s deleted." % artifact_list) elif args.subparser_name == "rotate_server_key": print(""" You are about to rotate the server key. Note that: - Clients might experience intermittent connection problems after the server keys rotated. - It's not possible to go back to an earlier key. Clients that see a new certificate will remember the cert's serial number and refuse to accept any certificate with a smaller serial number from that point on. """) if input("Continue? [yN]: ").upper() == "Y": if args.keylength: keylength = int(args.keylength) else: keylength = grr_config.CONFIG["Server.rsa_key_length"] maintenance_utils.RotateServerKey(cn=args.common_name, keylength=keylength)
def setUpModule() -> None: with temp.AutoTempFilePath(suffix=".yaml") as dummy_config_path: with flagsaver.flagsaver(config=dummy_config_path): config_lib.ParseConfigCommandLine()
def SetUpDummyConfig(): with temp.AutoTempFilePath(suffix="yaml") as dummy_config_path: with flagsaver.flagsaver(config=dummy_config_path): config_lib.ParseConfigCommandLine()
def main(argv): """Main.""" del argv # Unused. if flags.FLAGS.subparser_name == "version": version = config_server.VERSION["packageversion"] print("GRR configuration updater {}".format(version)) return token = config_updater_util.GetToken() grr_config.CONFIG.AddContext(contexts.COMMAND_LINE_CONTEXT) grr_config.CONFIG.AddContext(contexts.CONFIG_UPDATER_CONTEXT) if flags.FLAGS.subparser_name == "initialize": config_lib.ParseConfigCommandLine() if flags.FLAGS.noprompt: config_updater_util.InitializeNoPrompt(grr_config.CONFIG, token=token) else: config_updater_util.Initialize(grr_config.CONFIG, token=token) return server_startup.Init() try: print("Using configuration %s" % grr_config.CONFIG) except AttributeError: raise RuntimeError("No valid config specified.") if flags.FLAGS.subparser_name == "generate_keys": try: config_updater_util.GenerateKeys( grr_config.CONFIG, overwrite_keys=flags.FLAGS.overwrite_keys) except RuntimeError as e: # GenerateKeys will raise if keys exist and overwrite_keys is not set. print("ERROR: %s" % e) sys.exit(1) grr_config.CONFIG.Write() elif flags.FLAGS.subparser_name == "repack_clients": upload = not flags.FLAGS.noupload repacking.TemplateRepacker().RepackAllTemplates(upload=upload, token=token) elif flags.FLAGS.subparser_name == "show_user": maintenance_utils.ShowUser(flags.FLAGS.username, token=token) elif flags.FLAGS.subparser_name == "update_user": try: maintenance_utils.UpdateUser(flags.FLAGS.username, flags.FLAGS.password, flags.FLAGS.add_labels, flags.FLAGS.delete_labels, token=token) except maintenance_utils.UserError as e: print(e) elif flags.FLAGS.subparser_name == "delete_user": maintenance_utils.DeleteUser(flags.FLAGS.username, token=token) elif flags.FLAGS.subparser_name == "add_user": labels = [] if not flags.FLAGS.noadmin: labels.append("admin") if flags.FLAGS.labels: labels.extend(flags.FLAGS.labels) try: maintenance_utils.AddUser(flags.FLAGS.username, flags.FLAGS.password, labels, token=token) except maintenance_utils.UserError as e: print(e) elif flags.FLAGS.subparser_name == "upload_python": python_hack_root_urn = grr_config.CONFIG.Get("Config.python_hack_root") content = open(flags.FLAGS.file, "rb").read(1024 * 1024 * 30) aff4_path = flags.FLAGS.dest_path platform = flags.FLAGS.platform if not aff4_path: aff4_path = python_hack_root_urn.Add(platform.lower()).Add( os.path.basename(flags.FLAGS.file)) if not str(aff4_path).startswith(str(python_hack_root_urn)): raise ValueError("AFF4 path must start with %s." % python_hack_root_urn) context = ["Platform:%s" % platform.title(), "Client Context"] maintenance_utils.UploadSignedConfigBlob(content, aff4_path=aff4_path, client_context=context, token=token) elif flags.FLAGS.subparser_name == "upload_exe": content = open(flags.FLAGS.file, "rb").read(1024 * 1024 * 30) context = [ "Platform:%s" % flags.FLAGS.platform.title(), "Client Context" ] if flags.FLAGS.dest_path: dest_path = rdfvalue.RDFURN(flags.FLAGS.dest_path) else: dest_path = grr_config.CONFIG.Get( "Executables.aff4_path", context=context).Add(os.path.basename(flags.FLAGS.file)) # Now upload to the destination. maintenance_utils.UploadSignedConfigBlob(content, aff4_path=dest_path, client_context=context, token=token) print("Uploaded to %s" % dest_path) elif flags.FLAGS.subparser_name == "set_var": config = grr_config.CONFIG print("Setting %s to %s" % (flags.FLAGS.var, flags.FLAGS.val)) if flags.FLAGS.val.startswith("["): # Allow setting of basic lists. flags.FLAGS.val = flags.FLAGS.val[1:-1].split(",") config.Set(flags.FLAGS.var, flags.FLAGS.val) config.Write() elif flags.FLAGS.subparser_name == "upload_raw": if not flags.FLAGS.dest_path: flags.FLAGS.dest_path = aff4.ROOT_URN.Add("config").Add("raw") uploaded = config_updater_util.UploadRaw(flags.FLAGS.file, flags.FLAGS.dest_path, token=token) print("Uploaded to %s" % uploaded) elif flags.FLAGS.subparser_name == "upload_artifact": yaml.load(open(flags.FLAGS.file, "rb")) # Check it will parse. try: artifact.UploadArtifactYamlFile( open(flags.FLAGS.file, "rb").read(), overwrite=flags.FLAGS.overwrite_artifact) except rdf_artifacts.ArtifactDefinitionError as e: print("Error %s. You may need to set --overwrite_artifact." % e) elif flags.FLAGS.subparser_name == "delete_artifacts": artifact_list = flags.FLAGS.artifact if not artifact_list: raise ValueError("No artifact to delete given.") artifact_registry.DeleteArtifactsFromDatastore(artifact_list, token=token) print("Artifacts %s deleted." % artifact_list) elif flags.FLAGS.subparser_name == "download_missing_rekall_profiles": print("Downloading missing Rekall profiles.") s = rekall_profile_server.GRRRekallProfileServer() s.GetMissingProfiles() elif flags.FLAGS.subparser_name == "set_global_notification": notification = aff4_users.GlobalNotification( type=flags.FLAGS.type, header=flags.FLAGS.header, content=flags.FLAGS.content, link=flags.FLAGS.link) if flags.FLAGS.show_from: notification.show_from = rdfvalue.RDFDatetime( ).ParseFromHumanReadable(flags.FLAGS.show_from) if flags.FLAGS.duration: notification.duration = rdfvalue.Duration().ParseFromHumanReadable( flags.FLAGS.duration) print("Setting global notification.") print(notification) with aff4.FACTORY.Create( aff4_users.GlobalNotificationStorage.DEFAULT_PATH, aff4_type=aff4_users.GlobalNotificationStorage, mode="rw", token=token) as storage: storage.AddNotification(notification) elif flags.FLAGS.subparser_name == "rotate_server_key": print(""" You are about to rotate the server key. Note that: - Clients might experience intermittent connection problems after the server keys rotated. - It's not possible to go back to an earlier key. Clients that see a new certificate will remember the cert's serial number and refuse to accept any certificate with a smaller serial number from that point on. """) if builtins.input("Continue? [yN]: ").upper() == "Y": if flags.FLAGS.keylength: keylength = int(flags.FLAGS.keylength) else: keylength = grr_config.CONFIG["Server.rsa_key_length"] maintenance_utils.RotateServerKey(cn=flags.FLAGS.common_name, keylength=keylength)
def setUpModule() -> None: with temp.AutoTempFilePath(suffix="yaml") as dummy_config_path: flags.FLAGS.config = dummy_config_path config_lib.ParseConfigCommandLine()
def main(argv): """Main.""" del argv # Unused. if flags.FLAGS.subparser_name == "version": version = config_server.VERSION["packageversion"] print("GRR configuration updater {}".format(version)) return token = config_updater_util.GetToken() grr_config.CONFIG.AddContext(contexts.COMMAND_LINE_CONTEXT) grr_config.CONFIG.AddContext(contexts.CONFIG_UPDATER_CONTEXT) if flags.FLAGS.subparser_name == "initialize": config_lib.ParseConfigCommandLine() if flags.FLAGS.noprompt: config_updater_util.InitializeNoPrompt(grr_config.CONFIG, token=token) else: config_updater_util.Initialize(grr_config.CONFIG, token=token) return server_startup.Init() try: print("Using configuration %s" % grr_config.CONFIG) except AttributeError: raise RuntimeError("No valid config specified.") if flags.FLAGS.subparser_name == "generate_keys": try: config_updater_util.GenerateKeys( grr_config.CONFIG, overwrite_keys=flags.FLAGS.overwrite_keys) except RuntimeError as e: # GenerateKeys will raise if keys exist and overwrite_keys is not set. print("ERROR: %s" % e) sys.exit(1) grr_config.CONFIG.Write() elif flags.FLAGS.subparser_name == "repack_clients": upload = not flags.FLAGS.noupload repacking.TemplateRepacker().RepackAllTemplates(upload=upload, token=token) elif flags.FLAGS.subparser_name == "show_user": if flags.FLAGS.username: print(config_updater_util.GetUserSummary(flags.FLAGS.username)) else: print(config_updater_util.GetAllUserSummaries()) elif flags.FLAGS.subparser_name == "update_user": config_updater_util.UpdateUser(flags.FLAGS.username, password=flags.FLAGS.password, is_admin=flags.FLAGS.admin) elif flags.FLAGS.subparser_name == "delete_user": config_updater_util.DeleteUser(flags.FLAGS.username) elif flags.FLAGS.subparser_name == "add_user": config_updater_util.CreateUser(flags.FLAGS.username, password=flags.FLAGS.password, is_admin=flags.FLAGS.admin) elif flags.FLAGS.subparser_name == "upload_python": config_updater_util.UploadSignedBinary( flags.FLAGS.file, rdf_objects.SignedBinaryID.BinaryType.PYTHON_HACK, flags.FLAGS.platform, upload_subdirectory=flags.FLAGS.upload_subdirectory, token=token) elif flags.FLAGS.subparser_name == "upload_exe": config_updater_util.UploadSignedBinary( flags.FLAGS.file, rdf_objects.SignedBinaryID.BinaryType.EXECUTABLE, flags.FLAGS.platform, upload_subdirectory=flags.FLAGS.upload_subdirectory, token=token) elif flags.FLAGS.subparser_name == "set_var": config = grr_config.CONFIG print("Setting %s to %s" % (flags.FLAGS.var, flags.FLAGS.val)) if flags.FLAGS.val.startswith("["): # Allow setting of basic lists. flags.FLAGS.val = flags.FLAGS.val[1:-1].split(",") config.Set(flags.FLAGS.var, flags.FLAGS.val) config.Write() elif flags.FLAGS.subparser_name == "upload_artifact": yaml.load(open(flags.FLAGS.file, "rb")) # Check it will parse. try: artifact.UploadArtifactYamlFile( open(flags.FLAGS.file, "rb").read(), overwrite=flags.FLAGS.overwrite_artifact) except rdf_artifacts.ArtifactDefinitionError as e: print("Error %s. You may need to set --overwrite_artifact." % e) elif flags.FLAGS.subparser_name == "delete_artifacts": artifact_list = flags.FLAGS.artifact if not artifact_list: raise ValueError("No artifact to delete given.") artifact_registry.DeleteArtifactsFromDatastore(artifact_list, token=token) print("Artifacts %s deleted." % artifact_list) elif flags.FLAGS.subparser_name == "download_missing_rekall_profiles": print("Downloading missing Rekall profiles.") s = rekall_profile_server.GRRRekallProfileServer() s.GetMissingProfiles() elif flags.FLAGS.subparser_name == "rotate_server_key": print(""" You are about to rotate the server key. Note that: - Clients might experience intermittent connection problems after the server keys rotated. - It's not possible to go back to an earlier key. Clients that see a new certificate will remember the cert's serial number and refuse to accept any certificate with a smaller serial number from that point on. """) if builtins.input("Continue? [yN]: ").upper() == "Y": if flags.FLAGS.keylength: keylength = int(flags.FLAGS.keylength) else: keylength = grr_config.CONFIG["Server.rsa_key_length"] maintenance_utils.RotateServerKey(cn=flags.FLAGS.common_name, keylength=keylength)
def Init(): """Run all required startup routines and initialization hooks.""" # Set up a temporary syslog handler so we have somewhere to log problems # with ConfigInit() which needs to happen before we can start our create our # proper logging setup. syslog_logger = logging.getLogger("TempLogger") if os.path.exists("/dev/log"): handler = logging.handlers.SysLogHandler(address="/dev/log") else: handler = logging.handlers.SysLogHandler() syslog_logger.addHandler(handler) try: config_lib.SetPlatformArchContext() config_lib.ParseConfigCommandLine() except config_lib.Error: syslog_logger.exception("Died during config initialization") raise metric_metadata = server_metrics.GetMetadata() metric_metadata.extend(communicator.GetMetricMetadata()) stats_collector = prometheus_stats_collector.PrometheusStatsCollector( metric_metadata, registry=prometheus_client.REGISTRY) stats_collector_instance.Set(stats_collector) server_logging.ServerLoggingStartupInit() bs_registry_init.RegisterBlobStores() all_decoders.Register() all_parsers.Register() data_store.InitializeDataStore() if data_store.AFF4Enabled(): aff4.AFF4Init() # Requires data_store.InitializeDataStore. aff4_grr.GRRAFF4Init() # Requires aff4.AFF4Init. filestore.FileStoreInit() # Requires aff4_grr.GRRAFF4Init. results.ResultQueueInit() # Requires aff4.AFF4Init. sequential_collection.StartUpdaterOnce() if contexts.ADMIN_UI_CONTEXT in config.CONFIG.context: api_auth_manager.InitializeApiAuthManager() artifact.LoadArtifactsOnce() # Requires aff4.AFF4Init. checks.LoadChecksFromFilesystemOnce() client_approval_auth.InitializeClientApprovalAuthorizationManagerOnce() cronjobs.InitializeCronWorkerOnce() # Requires aff4.AFF4Init. email_alerts.InitializeEmailAlerterOnce() http_api.InitializeHttpRequestHandlerOnce() ip_resolver.IPResolverInitOnce() stats_server.InitializeStatsServerOnce() webauth.InitializeWebAuthOnce() # Exempt config updater from this check because it is the one responsible for # setting the variable. if not config.CONFIG.ContextApplied("ConfigUpdater Context"): if not config.CONFIG.Get("Server.initialized"): raise RuntimeError( "Config not initialized, run \"grr_config_updater" " initialize\". If the server is already configured," " add \"Server.initialized: True\" to your config.")
def main(argv): del argv # unused config_lib.ParseConfigCommandLine() with temp.AutoTempDirPath(remove_non_empty=True) as tmp_dir: _RunClient(tmp_dir)
def TestInit(): """Only used in tests and will rerun all the hooks to create a clean state.""" global INIT_RAN metric_metadata = server_metrics.GetMetadata() metric_metadata.extend(client_metrics.GetMetadata()) metric_metadata.extend(communicator.GetMetricMetadata()) stats_collector = prometheus_stats_collector.PrometheusStatsCollector( metric_metadata) stats_collector_instance.Set(stats_collector) # Tests use both the server template grr_server.yaml as a primary config file # (this file does not contain all required options, e.g. private keys), and # additional configuration in test_data/grr_test.yaml which contains typical # values for a complete installation. flags.FLAGS.config = package.ResourcePath( "grr-response-core", "install_data/etc/grr-server.yaml") flags.FLAGS.secondary_configs.append( package.ResourcePath("grr-response-test", "grr_response_test/test_data/grr_test.yaml")) # This config contains non-public settings that should be applied during # tests. extra_test_config = config.CONFIG["Test.additional_test_config"] if os.path.exists(extra_test_config): flags.FLAGS.secondary_configs.append(extra_test_config) # Tests additionally add a test configuration file. config_lib.SetPlatformArchContext() config_lib.ParseConfigCommandLine() # We are running a test so let the config system know that. config.CONFIG.AddContext(contexts.TEST_CONTEXT, "Context applied when we run tests.") test_ds = flags.FLAGS.test_data_store if test_ds is None: test_ds = compatibility.GetName(fake_data_store.FakeDataStore) config.CONFIG.Set("Datastore.implementation", test_ds) if not INIT_RAN: server_logging.ServerLoggingStartupInit() server_logging.SetTestVerbosity() blob_store_test_lib.UseTestBlobStore() data_store.InitializeDataStore() if data_store.AFF4Enabled(): aff4.AFF4Init() # Requires data_store.InitializeDataStore. aff4_grr.GRRAFF4Init() # Requires aff4.AFF4Init. filestore.FileStoreInit() # Requires aff4_grr.GRRAFF4Init. results.ResultQueueInit() # Requires aff4.AFF4Init. sequential_collection.StartUpdaterOnce() artifact.LoadArtifactsOnce() # Requires aff4.AFF4Init. checks.LoadChecksFromFilesystemOnce() client_approval_auth.InitializeClientApprovalAuthorizationManagerOnce() cronjobs.InitializeCronWorkerOnce() # Requires aff4.AFF4Init. email_alerts.InitializeEmailAlerterOnce() http_api.InitializeHttpRequestHandlerOnce() ip_resolver.IPResolverInitOnce() stats_server.InitializeStatsServerOnce() webauth.InitializeWebAuthOnce() db = data_store.DB.SetupTestDB() if db: data_store.DB = db data_store.DB.Initialize() if not utils.TimeBasedCache.house_keeper_thread: utils.TimeBasedCache() utils.TimeBasedCache.house_keeper_thread.exit = True utils.TimeBasedCache.house_keeper_thread.join() INIT_RAN = True