def __init__(self, packet): self._body = packet[4:] self.header = None try: self.header = EventHeader(self._body[1:20]) except: msg = get_trace_info() log.warning(msg)
def test(): # 实例化,修改日志文件名称,加载新配置 xxx = log.Log() xxx.log_filename = 'test_log.log' xxx.log_config() # 测试 log.debug('This is debug message') log.info('This is info message') log.warning('This is warning message')
def add_table(self, db, table, col): if db not in self._tables: self._tables[db] = {} if table not in self._tables[db]: self._tables[db][table] = {"columns_info": {}, "do_columns": {}, "pos_map": {}} for i in col: if not isinstance(i, str): log.warning("non-string col name.") continue if i not in self._tables[db][table]["do_columns"]: self._tables[db][table]["do_columns"][i] = None log.debug(json.dumps(self._tables))
def get_columns_info(self): for db, tables in self._tables.items(): for table, desc in tables.items(): try: sql = "select * from %s.%s limit 0,0" % (db, table) res, columns_desc = self._query(sql) for idx, field in enumerate(columns_desc): if field[0] in desc["do_columns"]: desc["columns_info"][field[0]] = field desc["pos_map"][idx] = field[0] except: log.warning(get_trace_info()) continue log.debug(json.dumps(self._tables))
def get_full_columns(self): for db, tables in self._tables.items(): for table, desc in tables.items(): try: sql = "show full columns from %s.%s" % (db, table) res, _ = self._query(sql) for idx, field in enumerate(res): if field["Field"] in desc["do_columns"]: desc["columns_info"][idx] = \ {"name":field["Field"], \ "type":field["Type"], \ "Default":field["Default"]} except: log.warning(get_trace_info()) continue log.debug(json.dumps(self._tables))
def add_table(self, db, table, col): if db not in self._tables: self._tables[db] = {} if table not in self._tables[db]: self._tables[db][table] = { "columns_info": {}, "do_columns": {}, "pos_map": {} } for i in col: if not isinstance(i, str): log.warning("non-string col name.") continue if i not in self._tables[db][table]["do_columns"]: self._tables[db][table]["do_columns"][i] = None log.debug(json.dumps(self._tables))
def get_full_columns(self): for db, tables in self._tables.items(): for table, desc in tables.items(): try: sql = "show full columns from %s.%s" % (db, table) res, _ = self._query(sql) for idx, field in enumerate(res): if field["Field"] in desc["do_columns"]: desc["columns_info"][idx] = { "name": field["Field"], "type": field["Type"], "Default": field["Default"], } except: log.warning(get_trace_info()) continue log.debug(json.dumps(self._tables))
def inspect_load_state(component_to_inspect, gateway_session, pod_status_dict, deployment_version_dict, container_status_dict ): deployment_config_label = "internal.acs.amadeus.com/component=%s" % component_to_inspect get_dc_command = "sudo oc get dc -l %s -o json" % deployment_config_label close_file = False deployer_and_component = None # This is the file which will have the deployer name in it if there is a failure filename = "/tmp/deployer_cleanup" # Attempt to remove the file from a previous run to ensure the file is always empty try: os.remove(filename) except OSError: pass # Need to check the component against acs.component.CMPS[cmp_name]['cluster'] # All of the component types are statically set in component.py try: acs.component.CMPS[component_to_inspect] except KeyError: print("Component is not part of the ACS category. Aborting the validation process") sys.exit(0) os_master_session = openshift.cluster.get_master_session(gateway_session, component_to_inspect) component_attributes = process_deployment_config_json(get_dc_command, os_master_session) for first_key in component_attributes.keys(): for pod_label in component_attributes[first_key].keys(): get_pod_command = "sudo oc get pod -l %s -o json" % pod_label process_pod_json(get_pod_command, pod_status_dict, deployment_version_dict, pod_label, os_master_session, container_status_dict) exit_with_error = False # compare the dc latest version to component latest version for first_key in deployment_version_dict.keys(): for second_key, value in deployment_version_dict[first_key].iteritems(): if component_attributes[first_key][second_key] != deployment_version_dict[first_key][second_key]: # If there is a problem, store the data with the component name, in the same way that it is stored # if there are no pods running. i.e. name=component. This allows consistent error handling # Swap the first_key and second_key to normalize the data in the dict pod_status_dict.pop(first_key) pod_status_dict[second_key] = {first_key: False} # At this point the dict looks like: # pod_status_dict['name=ahp-report-audit-dmn'] = {'report-audit-dmn-deployment': False} for key in pod_status_dict.keys(): log.info("Validating component: %s" % key) for second_key, value in pod_status_dict[key].iteritems(): if not value: if "name" in key: log.warning("%sThis component did not deploy at all: \t%s\n" % (textColours.FAIL, key)) # name=component will be in the key if there was a problem # The second key will be empty if the problem was that the pod did not exist. Usually when a pod # no longer exists, there is no longer a deployer pod kicking around. # This will get the name of the deployer pod and write it to a file for cleanup before # failing back to the previous build deployer_name = second_key + "-" + str(component_attributes[second_key][key]) + "-deploy" else: print("%sThis component has container(s) not ready: \t%s\n" % (textColours.FAIL, key)), print("\nContainer infomration from failure:\n"), for container in container_status_dict.keys(): print("\tDocker image name: %s\n" % (container_status_dict[container])), print("\tContainer name: %s\n" % container) if second_key is not None: try: try: int(second_key.split("-")[-1]) except ValueError: second_key = "-".join(second_key.split("-")[:-1]) command = "sudo oc get pod -l openshift.io/deployer-pod-for.name=%s -o json" % \ (second_key) log.info("Running command: %s" % command) cmd_output = os_master_session.get_cmd_output(command) deployer_json_data = json.loads(cmd_output) deployer_name = deployer_json_data['items'][0]['metadata']['name'] log.warning("Deployer pod found in pod json for %s, writing %s" % (deployer_name, filename)) except IndexError: log.warning("%sNo deployer pods found to clean up for: %s" % (textColours.FAIL, second_key)) except KeyError: print("key error") pass deployer_and_component = component_to_inspect + " : " + deployer_name if deployer_and_component is not None: write_file = open(filename, "a") write_file.write(deployer_and_component) write_file.write("\n") close_file = True exit_with_error = True if close_file: log.debug("%s has been written" % filename) write_file.close() return(exit_with_error)
config.read(options.config_file) try: environment_name = config.get(env_file_section_to_find_env_name, variable_name_of_environment_name) except ConfigParser.NoSectionError: log.error("%s was not found in %s. Cannot continue..." % (env_file_section_to_find_env_name, options.config_file)) sys.exit(1) except ConfigParser.NoOptionError: log.error("%s was not found in section %s in the config file: %s\n" "Cannot continue..." % (variable_name_of_environment_name, env_file_section_to_find_env_name, options.config_file)) sys.exit(1) except: log.warning("Unhandled exception! Exiting...") sys.exit(1) script_input_check(options.component_name) if options.artifactory: if options.major_version is not None: if environment_name is not None: list_of_components = [] components_with_blueprint_versions_dict = artifactory.ArtifactoryApi.return_all_objects_underneath_folder_with_specific_metadata( 'environment-blueprints') # Find the blueprint for a component and return the latest one for a given environment latest_blueprints_dict = artifactory.ArtifactoryApi.get_latest_version( environment_name, components_with_blueprint_versions_dict, options.major_version) # build the list of components to update based on information from artifactory
config = ConfigParser.RawConfigParser() config.optionxform = str # make keys case sensitive config.read(options.config_file) try: environment_name = config.get(env_file_section_to_find_env_name, variable_name_of_environment_name) except ConfigParser.NoSectionError: log.error("%s was not found in %s. Cannot continue..." % (env_file_section_to_find_env_name, options.config_file)) sys.exit(1) except ConfigParser.NoOptionError: log.error("%s was not found in section %s in the config file: %s\n" "Cannot continue..." % (variable_name_of_environment_name, env_file_section_to_find_env_name, options.config_file)) sys.exit(1) except: log.warning("Unhandled exception! Exiting...") sys.exit(1) script_input_check(options.component_name) if options.artifactory: if options.major_version is not None: if environment_name is not None: list_of_components = [] components_with_blueprint_versions_dict = artifactory.ArtifactoryApi.return_all_objects_underneath_folder_with_specific_metadata( 'environment-blueprints') # Find the blueprint for a component and return the latest one for a given environment latest_blueprints_dict = artifactory.ArtifactoryApi.get_latest_version(environment_name, components_with_blueprint_versions_dict, options.major_version) # build the list of components to update based on information from artifactory