def restart(self): ''' Restart the cluster and deploy configurations but only services with stale configurations. ''' log.info("Trying to restart cluster : \"{0}\"".format( self.cluster.name)) time.sleep(10) '''We need to sleep for a small amount of time so CDH notices it's configurations have been updated.''' self.cluster.restart(True, True) self.poll_commands("Restart")
def exec_formula(cluster, args): log.info("using formula: {0}".format(args.formula)) user_formula_args = None if args.formula_args: user_formula_args_path = file.file_path(cc.FORMULA_ARGS, args.path) log.info("Reading formula args config file: {0}".format( user_formula_args_path)) user_formula_args = file.open_yaml_conf(user_formula_args_path) #execute formula global variables vars = { "log": log, "kb_to_bytes": convert.kb_to_bytes, "bytes_to_kb": convert.bytes_to_kb, "mb_to_bytes": convert.mb_to_bytes, "bytes_to_mb": convert.bytes_to_mb, "gb_to_bytes": convert.gb_to_bytes, "bytes_to_gb": convert.bytes_to_gb, "tr_to_bytes": convert.tr_to_bytes, "bytes_to_tr": convert.bytes_to_tr } local = {} config = {} try: execfile(args.formula, vars, local) constants = local["constants"](cluster, log) for member in constants: if hasattr(constants[member], '__call__'): if user_formula_args and member in user_formula_args: if constants[member](user_formula_args[member] ) != user_formula_args[member]: log.warning( "Formula arg value '{0}' was ignored using default '{1}'. Formula arg value must adhere to these rules {2} " .format( user_formula_args[member], constants[member](user_formula_args[member]), inspect.getsource(constants[member]))) constants[member] = constants[member]( user_formula_args[member]) else: constants[member] = constants[member](None) config = local["formula"](cluster, log, constants) except IOError: log.fatal("formula file: {0} doesn't exist".format(args.formula)) return config
def save_cdh_configuration(vars, args): #TODO: think of a better name if len(vars["cdh"]) > 0: temp = {} path = file.file_path(cc.CDH_CONFIG, args.path) for key in vars["cdh"]: key_split = key.split(".") cc.utils.dict.nest(temp, key_split, vars["cdh"][key]) file.write_json_conf(temp, path) log.info("Wrote CDH configuration file to: {0}".format(path)) return path else: log.warning("No CDH configurations to save.") return None
def set(self, configs): ''' re key our configs to use actual CDH config key :param configs: dictionary of configs that were parsed from json. :return: ''' temp = {} for config in configs: if config in self.configs: temp[self.configs[config].cdh_config.name] = configs[config] else: log.warning( "No configuration key found for: {0}".format(config)) log.info("Updating config group: {0}".format(self.key)) self.cdh_group.update_config(temp) self.__update() return temp
def get_cluster(self): ''' Try to select a cluster if we have more than one cluster we need to ask the user. If we have 0 through fatal. if we have exactly one make it the default selection ''' clusters = self.cdh_get_clusters() #if we have more than one cluster we need to pick witch one to configure against if len(clusters) > 1: log.info("More than one Cluster Detected.") return self.select_cluster(clusters) #else pick the only cluster elif len(clusters) == 1: log.info("cluster selected: {0}".format(clusters[0].name)) return clusters[0] elif len(clusters) <= 0: log.fatal("No clusters to configure")
def poll_commands(self, command_name): ''' poll the currently running commands to find out when the config deployment and restart have finished :param service: service to pool commands for :param command_name: the command we will be looking for, ie 'Restart' ''' while True: time.sleep(2) log.info("Waiting for {0}".format(command_name)), sys.stdout.flush() commands = self.cluster.get_commands(view="full") if commands: for c in commands: if c.name == command_name: #active = c.active break else: break print "\n" log.info("Done with {0}.".format(command_name))
def run(args, cluster=None, dt=None): if cluster is None: cluster = Cluster(args.host, args.port, args.username, args.password, args.cluster) if cluster: cluster_before = save_to_json(cluster, args.path, "before") #Read the generated configs cdh_config_path = file.file_path(cc.CDH_CONFIG, args.path) log.info("Reading CDH config file: {0}".format(cdh_config_path)) cdh_configs = file.open_json_conf(cdh_config_path) user_cdh_config_path = file.file_path(cc.USER_CDH_CONFIG, args.path) log.info("Reading user CDH config file: {0}".format(user_cdh_config_path)) user_configs = file.open_json_conf(user_cdh_config_path) merged_config_path = None if user_configs: #merge config dictionaries and resolve conflicts log.info("conflict resolution: {0}".format(args.conflict_merge)) configs = cc.utils.dict.merge_dicts(user_configs, cdh_configs, convert_conflict_merge(args.conflict_merge)) merged_config_path = file.file_path(cc.MERGED_CDH_CONFIG, args.path) log.info("Writting merged CDH config file: {0}".format(merged_config_path)) file.write_json_conf(configs, merged_config_path) else: configs = cdh_configs #if update cdh is "yes" then we iterate and update all the specified keys if args.update_cdh == "yes": #iterate through services, set cdh configs and possibly restart services cluster.update_configs(configs, False if args.restart_cdh == "no" else True) cluster_after = save_to_json(cluster, args.path, "after") file.snapshots(args.host, "push", args.path, dt, cluster_before, cluster_after, cdh_config_path, user_cdh_config_path, merged_config_path) else: log.fatal("Couldn't connect to the CDH cluster")
def test_info(self): log.logger.log = MagicMock() log.info("info") log.logger.log.assert_called_with(logging.INFO, "info")