def run(command, parser, cl_args, unknown_args): """ run the update command """ Log.debug("Update Args: %s", cl_args) # Build jar list extra_lib_jars = jars.packing_jars() action = "update topology%s" % (' in dry-run mode' if cl_args["dry_run"] else '') # Build extra args dict_extra_args = {} try: dict_extra_args = build_extra_args_dict(cl_args) except Exception as err: return SimpleResult(Status.InvocationError, err.message) # Execute if cl_args['deploy_mode'] == config.SERVER_MODE: return cli_helper.run_server(command, cl_args, action, dict_extra_args) else: # Convert extra argument to commandline format and then execute list_extra_args = convert_args_dict_to_list(dict_extra_args) return cli_helper.run_direct(command, cl_args, action, list_extra_args, extra_lib_jars)
def run(command, parser, cl_args, unknown_args): """ run the update command """ Log.debug("Update Args: %s", cl_args) extra_lib_jars = jars.packing_jars() action = "update topology%s" % (' in dry-run mode' if cl_args["dry_run"] else '') if cl_args['deploy_mode'] == config.SERVER_MODE: dict_extra_args = { "component_parallelism": cl_args['component_parallelism'] } if cl_args["dry_run"]: dict_extra_args.update({'dry_run': True}) if "dry_run_format" in cl_args: dict_extra_args.update( {"dry_run_format", cl_args["dry_run_format"]}) return cli_helper.run_server(command, cl_args, action, dict_extra_args) else: list_extra_args = [ "--component_parallelism", ','.join(cl_args['component_parallelism']) ] if cl_args["dry_run"]: list_extra_args.append('--dry_run') if "dry_run_format" in cl_args: list_extra_args += [ "--dry_run_format", cl_args["dry_run_format"] ] return cli_helper.run_direct(command, cl_args, action, list_extra_args, extra_lib_jars)
def run(command, parser, cl_args, unknown_args): """ run the update command """ extra_args = [ "--component_parallelism", ','.join(cl_args['component_parallelism']) ] extra_lib_jars = jars.packing_jars() return cli_helper.run(command, cl_args, "update topology", extra_args, extra_lib_jars)
def launch_a_topology(cl_args, tmp_dir, topology_file, topology_defn_file, topology_name): ''' Launch a topology given topology jar, its definition file and configurations :param cl_args: :param tmp_dir: :param topology_file: :param topology_defn_file: :return: ''' # get the normalized path for topology.tar.gz topology_pkg_path = config.normalized_class_path(os.path.join(tmp_dir, 'topology.tar.gz')) # get the release yaml file release_yaml_file = config.get_heron_release_file() # create a tar package with the cluster configuration and generated config files config_path = cl_args['config_path'] tar_pkg_files = [topology_file, topology_defn_file] generated_config_files = [release_yaml_file, cl_args['override_config_file']] config.create_tar(topology_pkg_path, tar_pkg_files, config_path, generated_config_files) # pass the args to submitter main args = [ "--cluster", cl_args['cluster'], "--role", cl_args['role'], "--environment", cl_args['environ'], "--heron_home", config.get_heron_dir(), "--config_path", config_path, "--override_config_file", cl_args['override_config_file'], "--release_file", release_yaml_file, "--topology_package", topology_pkg_path, "--topology_defn", topology_defn_file, "--topology_bin", topology_file # pex file if pex specified ] if Log.getEffectiveLevel() == logging.DEBUG: args.append("--verbose") lib_jars = config.get_heron_libs( jars.scheduler_jars() + jars.uploader_jars() + jars.statemgr_jars() + jars.packing_jars() ) extra_jars = cl_args['extra_launch_classpath'].split(':') # invoke the submitter to submit and launch the topology main_class = 'com.twitter.heron.scheduler.SubmitterMain' resp = execute.heron_class( class_name=main_class, lib_jars=lib_jars, extra_jars=extra_jars, args=args, java_defines=[]) err_context = "Failed to launch topology '%s'" % topology_name succ_context = "Successfully launched topology '%s'" % topology_name resp.add_context(err_context, succ_context) return resp
def run(command, parser, cl_args, unknown_args): """ run the update command """ extra_args = ["--component_parallelism", ','.join(cl_args['component_parallelism'])] extra_lib_jars = jars.packing_jars() if cl_args["dry_run"]: extra_args.append('--dry_run') if "dry_run_format" in cl_args: extra_args += ["--dry_run_format", cl_args["dry_run_format"]] return cli_helper.run(command, cl_args, "update topology", extra_args, extra_lib_jars)
def run(command, parser, cl_args, unknown_args): """ run the update command """ extra_args = ["--component_parallelism", ','.join(cl_args['component_parallelism'])] extra_lib_jars = jars.packing_jars() action = "update topology%s" % (' in dry-run mode' if cl_args["dry_run"] else '') if cl_args["dry_run"]: extra_args.append('--dry_run') if "dry_run_format" in cl_args: extra_args += ["--dry_run_format", cl_args["dry_run_format"]] return cli_helper.run(command, cl_args, action, extra_args, extra_lib_jars)
def launch_a_topology(cl_args, tmp_dir, topology_file, topology_defn_file): ''' Launch a topology given topology jar, its definition file and configurations :param cl_args: :param tmp_dir: :param topology_file: :param topology_defn_file: :return: ''' # get the normalized path for topology.tar.gz topology_pkg_path = config.normalized_class_path(os.path.join(tmp_dir, 'topology.tar.gz')) # get the release yaml file release_yaml_file = config.get_heron_release_file() # create a tar package with the cluster configuration and generated config files config_path = cl_args['config_path'] tar_pkg_files = [topology_file, topology_defn_file] generated_config_files = [release_yaml_file, cl_args['override_config_file']] config.create_tar(topology_pkg_path, tar_pkg_files, config_path, generated_config_files) # pass the args to submitter main args = [ "--cluster", cl_args['cluster'], "--role", cl_args['role'], "--environment", cl_args['environ'], "--heron_home", config.get_heron_dir(), "--config_path", config_path, "--override_config_file", cl_args['override_config_file'], "--release_file", release_yaml_file, "--topology_package", topology_pkg_path, "--topology_defn", topology_defn_file, "--topology_bin", topology_file # pex file if pex specified ] if Log.getEffectiveLevel() == logging.DEBUG: args.append("--verbose") lib_jars = config.get_heron_libs( jars.scheduler_jars() + jars.uploader_jars() + jars.statemgr_jars() + jars.packing_jars() ) # invoke the submitter to submit and launch the topology execute.heron_class( 'com.twitter.heron.scheduler.SubmitterMain', lib_jars, extra_jars=[], args=args, java_defines=[] )
def run(command, parser, cl_args, unknown_args): """ run the update command """ topology_name = cl_args['topology-name'] try: new_args = [ "--cluster", cl_args['cluster'], "--role", cl_args['role'], "--environment", cl_args['environ'], "--heron_home", config.get_heron_dir(), "--config_path", cl_args['config_path'], "--override_config_file", cl_args['override_config_file'], "--release_file", config.get_heron_release_file(), "--topology_name", topology_name, "--command", command, "--component_parallelism", ','.join(cl_args['component_parallelism']), ] if Log.getEffectiveLevel() == logging.DEBUG: new_args.append("--verbose") lib_jars = config.get_heron_libs(jars.scheduler_jars() + jars.statemgr_jars() + jars.packing_jars()) # invoke the runtime manager to kill the topology execute.heron_class('com.twitter.heron.scheduler.RuntimeManagerMain', lib_jars, extra_jars=[], args=new_args) except Exception as ex: Log.error('Failed to update topology \'%s\': %s', topology_name, traceback.format_exc(ex)) return False Log.info('Successfully updated topology \'%s\'' % topology_name) return True
def run(command, parser, cl_args, unknown_args): """ run the update command """ topology_name = cl_args['topology-name'] try: new_args = [ "--cluster", cl_args['cluster'], "--role", cl_args['role'], "--environment", cl_args['environ'], "--heron_home", config.get_heron_dir(), "--config_path", cl_args['config_path'], "--override_config_file", cl_args['override_config_file'], "--release_file", config.get_heron_release_file(), "--topology_name", topology_name, "--command", command, "--component_parallelism", ','.join(cl_args['component_parallelism']), ] if Log.getEffectiveLevel() == logging.DEBUG: new_args.append("--verbose") lib_jars = config.get_heron_libs( jars.scheduler_jars() + jars.statemgr_jars() + jars.packing_jars() ) # invoke the runtime manager to kill the topology execute.heron_class( 'com.twitter.heron.scheduler.RuntimeManagerMain', lib_jars, extra_jars=[], args=new_args ) except Exception as ex: Log.error('Failed to update topology \'%s\': %s', topology_name, traceback.format_exc(ex)) return False Log.info('Successfully updated topology \'%s\'' % topology_name) return True
def run(command, parser, cl_args, unknown_args): """ run the update command """ Log.debug("Update Args: %s", cl_args) # Build jar list extra_lib_jars = jars.packing_jars() action = "update topology%s" % (' in dry-run mode' if cl_args["dry_run"] else '') # Build extra args dict_extra_args = {} try: dict_extra_args = build_extra_args_dict(cl_args) except Exception as err: return SimpleResult(Status.InvocationError, err.message) # Execute if cl_args['deploy_mode'] == config.SERVER_MODE: return cli_helper.run_server(command, cl_args, action, dict_extra_args) else: # Convert extra argument to commandline format and then execute list_extra_args = convert_args_dict_to_list(dict_extra_args) return cli_helper.run_direct(command, cl_args, action, list_extra_args, extra_lib_jars)
def launch_a_topology(cl_args, tmp_dir, topology_file, topology_defn_file, topology_name): ''' Launch a topology given topology jar, its definition file and configurations :param cl_args: :param tmp_dir: :param topology_file: :param topology_defn_file: :return: ''' # get the normalized path for topology.tar.gz topology_pkg_path = config.normalized_class_path( os.path.join(tmp_dir, 'topology.tar.gz')) # get the release yaml file release_yaml_file = config.get_heron_release_file() # create a tar package with the cluster configuration and generated config files config_path = cl_args['config_path'] tar_pkg_files = [topology_file, topology_defn_file] generated_config_files = [ release_yaml_file, cl_args['override_config_file'] ] config.create_tar(topology_pkg_path, tar_pkg_files, config_path, generated_config_files) # pass the args to submitter main args = [ "--cluster", cl_args['cluster'], "--role", cl_args['role'], "--environment", cl_args['environ'], "--submit_user", cl_args['submit_user'], "--heron_home", config.get_heron_dir(), "--config_path", config_path, "--override_config_file", cl_args['override_config_file'], "--release_file", release_yaml_file, "--topology_package", topology_pkg_path, "--topology_defn", topology_defn_file, "--topology_bin", os.path.basename(topology_file) # pex file if pex specified ] if Log.getEffectiveLevel() == logging.DEBUG: args.append("--verbose") if cl_args["dry_run"]: args.append("--dry_run") if "dry_run_format" in cl_args: args += ["--dry_run_format", cl_args["dry_run_format"]] lib_jars = config.get_heron_libs(jars.scheduler_jars() + jars.uploader_jars() + jars.statemgr_jars() + jars.packing_jars()) extra_jars = cl_args['extra_launch_classpath'].split(':') # invoke the submitter to submit and launch the topology main_class = 'com.twitter.heron.scheduler.SubmitterMain' res = execute.heron_class(class_name=main_class, lib_jars=lib_jars, extra_jars=extra_jars, args=args, java_defines=[]) err_context = "Failed to launch topology '%s'" % topology_name if cl_args["dry_run"]: err_context += " in dry-run mode" succ_context = "Successfully launched topology '%s'" % topology_name if cl_args["dry_run"]: succ_context += " in dry-run mode" res.add_context(err_context, succ_context) return res
def run(command, parser, cl_args, unknown_args): """ run the update command """ extra_args = ["--component_parallelism", ','.join(cl_args['component_parallelism'])] extra_lib_jars = jars.packing_jars() return cli_helper.run(command, cl_args, "update topology", extra_args, extra_lib_jars)