def __init__(self, args): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) self.base_url = args[Constant.BASE_URL_KEY] temp_dir = FileUtil.etl_temp_dir(args, "CODESEARCH") self.code_search_committer_writer = FileWriter( os.path.join(temp_dir, args[Constant.DATABASE_SCM_REPO_OUTPUT_KEY]))
class Building(BuildingType): log = LoggerFactory.getLogger("Building") def __init__(self): Building.log.info("python文件中Building对象进行初始化。。。 ") self.buildingName = None self.buildingAddress = None self.buildingId = None def getBuildingName(self): Building.log.info("getBuildingName 方法被调用") return self.buildingName def getBuildingAddress(self): Building.log.info("getBuildingAddress 方法被调用") return self.buildingAddress def getBuildingId(self): Building.log.info("getBuildingId 方法被调用") return self.buildingId def setBuildingName(self, buildingName): Building.log.info("setBuildingName 方法被调用") self.buildingName = buildingName def setBuildingAddress(self, buildingAddress): Building.log.info("setBuildingAddress 方法被调用") self.buildingAddress = buildingAddress def setBuildingId(self, buildingId): Building.log.info("setBuildingId 方法被调用") self.buildingId = buildingId
def tail(f, n, offset=0): myLogger = LoggerFactory.getLogger("logmanager") avg_line_length = 74 to_read = n + (offset or 0) while 1: try: fo = open(f, "r+") fo.seek(-(avg_line_length * to_read), 2) except IOError: # woops. apparently file is smaller than what we want # to step back, go to the beginning instead myLogger.error("wooops file is too small") fo.seek(0) except: myLogger.error("Invalid request") pos = fo.tell() lines = fo.read().splitlines(True) if len(lines) >= to_read or pos == 0: fo.close() #return lines[-to_read:offset and -offset or None], \ # len(lines) > to_read or pos > 0 #return lines[-to_read:offset and -offset or None] return lines if len(lines) < 5: fo.close() return lines avg_line_length = int(avg_line_length * 1.3 + 0.5)
def __init__(self, args): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) self.elasticsearch_index_url = args[Constant.ELASTICSEARCH_URL_KEY] self.elasticsearch_port = args[Constant.ELASTICSEARCH_PORT_KEY] if Constant.ELASTICSEARCH_INDEX_KEY not in args: self.elasticsearch_index = "wherehows" else: self.elasticsearch_index = args[Constant.ELASTICSEARCH_INDEX_KEY] self.index_mapping_file = args[ Constant.WH_ELASTICSEARCH_INDEX_MAPPING_FILE] self.bulk_chunk_size = int( args[Constant.ELASTICSEARCH_BULK_INSERT_SIZE] ) # bulk insert size to elastic search engine self.es_url_request_timeout = int( args[Constant.ELASTICSEARCH_URL_REQUEST_TIMEOUT] ) # url to post data to elastic search engine request time out self.max_retry_times = int( args[Constant.WH_DB_MAX_RETRY_TIMES] ) # max times for db re-connection when lost during fetching source data self.base_url = self.elasticsearch_index_url + ':' + str( self.elasticsearch_port) + '/' self.logger.info(self.base_url) self.old_index = [] self.new_index = [] self.databaseConnect(args)
def __init__(self, args): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) username = args[Constant.WH_DB_USERNAME_KEY] password = args[Constant.WH_DB_PASSWORD_KEY] JDBC_DRIVER = args[Constant.WH_DB_DRIVER_KEY] JDBC_URL = args[Constant.WH_DB_URL_KEY] self.input_table_file = args[Constant.ORA_SCHEMA_OUTPUT_KEY] self.input_field_file = args[Constant.ORA_FIELD_OUTPUT_KEY] self.input_sample_file = args[Constant.ORA_SAMPLE_OUTPUT_KEY] self.db_id = args[Constant.DB_ID_KEY] self.wh_etl_exec_id = args[Constant.WH_EXEC_ID_KEY] self.conn_mysql = zxJDBC.connect(JDBC_URL, username, password, JDBC_DRIVER) self.conn_cursor = self.conn_mysql.cursor() if Constant.INNODB_LOCK_WAIT_TIMEOUT in args: lock_wait_time = args[Constant.INNODB_LOCK_WAIT_TIMEOUT] self.conn_cursor.execute("SET innodb_lock_wait_timeout = %s;" % lock_wait_time) self.logger.info( "Load Oracle Metadata into {}, db_id {}, wh_exec_id {}".format( JDBC_URL, self.db_id, self.wh_etl_exec_id))
def __init__(self, args): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) username = args[Constant.WH_DB_USERNAME_KEY] password = args[Constant.WH_DB_PASSWORD_KEY] JDBC_DRIVER = args[Constant.WH_DB_DRIVER_KEY] JDBC_URL = args[Constant.WH_DB_URL_KEY] self.db_id = args[Constant.JOB_REF_ID_KEY] self.wh_etl_exec_id = args[Constant.WH_EXEC_ID_KEY] self.conn_mysql = zxJDBC.connect(JDBC_URL, username, password, JDBC_DRIVER) self.conn_cursor = self.conn_mysql.cursor() if Constant.INNODB_LOCK_WAIT_TIMEOUT in args: lock_wait_time = args[Constant.INNODB_LOCK_WAIT_TIMEOUT] self.conn_cursor.execute("SET innodb_lock_wait_timeout = %s;" % lock_wait_time) temp_dir = FileUtil.etl_temp_dir(args, "REDSHIFT") self.input_table_file = os.path.join( temp_dir, args[Constant.RED_SCHEMA_OUTPUT_KEY]) self.input_field_file = os.path.join( temp_dir, args[Constant.RED_FIELD_OUTPUT_KEY]) self.logger.info( "Load Redshift Metadata into {}, db_id {}, wh_exec_id {}".format( JDBC_URL, self.db_id, self.wh_etl_exec_id))
def __init__(self, hdfs_uri, kerberos=False, kerberos_principal=None, keytab_file=None): """ :param hdfs_uri: hdfs://hadoop-name-node:port :param kerberos: optional, if kerberos authentication is needed :param kerberos_principal: optional, [email protected] :param keytab_file: optional, absolute path to keytab file """ self.logger = LoggerFactory.getLogger(self.__class__.__name__) self.logger.info("keytab_file: " + keytab_file) hdfs_conf = Configuration() if hdfs_uri.startswith('hdfs://'): hdfs_conf.set(Hdfs.FS_DEFAULT_NAME_KEY, hdfs_uri) elif hdfs_uri > "": self.logger.error("%s is an invalid uri for hdfs namenode ipc bind." % hdfs_uri) if kerberos: # init kerberos and keytab if not kerberos_principal or not keytab_file or kerberos_principal == '' or keytab_file == '': print "Kerberos Principal and Keytab File Name/Path are required!" hdfs_conf.set("hadoop.security.authentication", "kerberos") hdfs_conf.set("dfs.namenode.kerberos.principal.pattern", "*") UserGroupInformation.setConfiguration(hdfs_conf) UserGroupInformation.loginUserFromKeytab(kerberos_principal, keytab_file) self.fs = Hdfs.get(hdfs_conf) requests.packages.urllib3.disable_warnings() self.logger.info("Initiated SchemaUrlHelper")
def __init__(self, args): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) username = args[Constant.WH_DB_USERNAME_KEY] password = args[Constant.WH_DB_PASSWORD_KEY] JDBC_DRIVER = args[Constant.WH_DB_DRIVER_KEY] JDBC_URL = args[Constant.WH_DB_URL_KEY] self.db_id = args[Constant.JOB_REF_ID_KEY] self.wh_etl_exec_id = args[Constant.WH_EXEC_ID_KEY] self.conn_mysql = zxJDBC.connect(JDBC_URL, username, password, JDBC_DRIVER) self.conn_cursor = self.conn_mysql.cursor() if Constant.INNODB_LOCK_WAIT_TIMEOUT in args: lock_wait_time = args[Constant.INNODB_LOCK_WAIT_TIMEOUT] self.conn_cursor.execute("SET innodb_lock_wait_timeout = %s;" % lock_wait_time) temp_dir = FileUtil.etl_temp_dir(args, "ORACLE") self.input_table_file = os.path.join(temp_dir, args[Constant.ORA_SCHEMA_OUTPUT_KEY]) self.input_field_file = os.path.join(temp_dir, args[Constant.ORA_FIELD_OUTPUT_KEY]) self.input_sample_file = os.path.join(temp_dir, args[Constant.ORA_SAMPLE_OUTPUT_KEY]) self.collect_sample = False if Constant.ORA_LOAD_SAMPLE in args: self.collect_sample = FileUtil.parse_bool(args[Constant.ORA_LOAD_SAMPLE], False) self.logger.info("Load Oracle Metadata into {}, db_id {}, wh_exec_id {}" .format(JDBC_URL, self.db_id, self.wh_etl_exec_id))
def __init__(self, args): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) self.app_id = int(args[Constant.JOB_REF_ID_KEY]) self.wh_exec_id = long(args[Constant.WH_EXEC_ID_KEY]) self.wh_con = zxJDBC.connect(args[Constant.WH_DB_URL_KEY], args[Constant.WH_DB_USERNAME_KEY], args[Constant.WH_DB_PASSWORD_KEY], args[Constant.WH_DB_DRIVER_KEY]) self.wh_cursor = self.wh_con.cursor() self.aw_con = self.get_connection(args[Constant.AW_DB_URL_KEY], args[Constant.AW_DB_PORT_KEY], args[Constant.AW_DB_NAME_KEY], args[Constant.AW_DB_USERNAME_KEY], args[Constant.AW_DB_PASSWORD_KEY], args[Constant.AW_DB_DRIVER_KEY]) self.aw_cursor = self.aw_con.cursor() self.lookback_period = args[Constant.AW_EXEC_ETL_LOOKBACK_KEY] self.app_folder = args[Constant.WH_APP_FOLDER_KEY] self.metadata_folder = self.app_folder + "/" + str(SchedulerType.APPWORX) + "/" + str(self.app_id) self.last_execution_unix_time = None self.get_last_execution_unix_time() if not os.path.exists(self.metadata_folder): try: os.makedirs(self.metadata_folder) except Exception as e: self.logger.error(e)
def __init__(self, args): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) username = args[Constant.WH_DB_USERNAME_KEY] password = args[Constant.WH_DB_PASSWORD_KEY] JDBC_DRIVER = args[Constant.WH_DB_DRIVER_KEY] JDBC_URL = args[Constant.WH_DB_URL_KEY] self.input_table_file = args[Constant.ORA_SCHEMA_OUTPUT_KEY] self.input_field_file = args[Constant.ORA_FIELD_OUTPUT_KEY] self.input_sample_file = args[Constant.ORA_SAMPLE_OUTPUT_KEY] self.db_id = args[Constant.DB_ID_KEY] self.wh_etl_exec_id = args[Constant.WH_EXEC_ID_KEY] self.conn_mysql = zxJDBC.connect(JDBC_URL, username, password, JDBC_DRIVER) self.conn_cursor = self.conn_mysql.cursor() if Constant.INNODB_LOCK_WAIT_TIMEOUT in args: lock_wait_time = args[Constant.INNODB_LOCK_WAIT_TIMEOUT] self.conn_cursor.execute("SET innodb_lock_wait_timeout = %s;" % lock_wait_time) self.logger.info("Load Oracle Metadata into {}, db_id {}, wh_exec_id {}" .format(JDBC_URL, self.db_id, self.wh_etl_exec_id)) self.dict_dataset_table = 'dict_dataset' self.field_comments_table = 'field_comments' self.dict_field_table = 'dict_field_detail' self.dict_field_comment_table = 'dict_dataset_field_comment' self.dict_dataset_sample_table = 'dict_dataset_sample'
def __init__(self, server): self.logger = LoggerFactory.getLogger("com.xebialabs.snyk-plugin") if server in [None, ""]: raise Exception("server is undefined") self.orgId = server['orgId'] self.http_request = HttpRequest(server)
def __init__(self, name, strSetPV, strGetPV, strStatusPV, strEnablePV): self.logger = LoggerFactory.getLogger("ID_PolarisationClass:%s" % name) self.verbose = True self.setName(name) self.setInputNames([name]) self.setExtraNames([]) # self.Units=[strUnit]; self.setLevel(7) # self.setOutputFormat(["%20.12f"]); self.enable = ['Beamline', 'Machine Control Room'] self.positions = [ 'PosCirc', 'NegCirc', 'Horizontal', 'Vertical', 'LinArb' ] self.chSetPol = CAClient(strSetPV) self.chSetPol.configure() self.chGetPol = CAClient(strGetPV) self.chGetPol.configure() self.chStatus = CAClient(strStatusPV) self.chStatus.configure() self.chEnable = CAClient(strEnablePV) self.chEnable.configure() self.currentPol = 'Unknown' self.demandPol = 'Unknown' self.strStatus = 'unknown' self.strEnable = self.enable[1]
def __init__(self, args): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) username = args[Constant.WH_DB_USERNAME_KEY] password = args[Constant.WH_DB_PASSWORD_KEY] JDBC_DRIVER = args[Constant.WH_DB_DRIVER_KEY] JDBC_URL = args[Constant.WH_DB_URL_KEY] self.database_scm_repo_file = args[ Constant.DATABASE_SCM_REPO_OUTPUT_KEY] self.app_id = args[Constant.APP_ID_KEY] self.wh_etl_exec_id = args[Constant.WH_EXEC_ID_KEY] self.conn_mysql = zxJDBC.connect(JDBC_URL, username, password, JDBC_DRIVER) self.conn_cursor = self.conn_mysql.cursor() if Constant.INNODB_LOCK_WAIT_TIMEOUT in args: lock_wait_time = args[Constant.INNODB_LOCK_WAIT_TIMEOUT] self.conn_cursor.execute("SET innodb_lock_wait_timeout = %s;" % lock_wait_time) self.logger.info( "Load Code Search CSV into {}, app_id {}, wh_exec_id {}".format( JDBC_URL, self.app_id, self.wh_etl_exec_id))
def __init__( self, name, pgm_grat_pitch, pgm_mirr_pitch, pgmpvroot): # motors, maybe also detector to set the delay time self.logger = LoggerFactory.getLogger( "ContinuousPgmGratingEnergyMoveController:%s" % name) self.verbose = False self.name = name self._pgm_grat_pitch = pgm_grat_pitch self._pgm_mirr_pitch = pgm_mirr_pitch self._start_event = threading.Event() self._pgm_grat_pitch_speed_orig = None self._movelog_time = datetime.now() self._pgm_runupdown_time = None self.pvs = PvManager( { 'grating_density': 'NLINES', 'cff': 'CFF', 'grating_offset': 'GRTOFFSET', 'plane_mirror_offset': 'MIROFFSET', 'pgm_energy': 'ENERGY', 'grating_pitch': 'GRT:PITCH', 'mirror_pitch': 'MIR:PITCH', 'energy_calibration_gradient': 'MX', 'energy_calibration_reference': 'REFERENCE' }, pgmpvroot) if installation.isLive(): self.pvs.configure()
def __init__(self, args): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) self.app_id = int(args[Constant.APP_ID_KEY]) self.wh_exec_id = long(args[Constant.WH_EXEC_ID_KEY]) self.wh_con = zxJDBC.connect(args[Constant.WH_DB_URL_KEY], args[Constant.WH_DB_USERNAME_KEY], args[Constant.WH_DB_PASSWORD_KEY], args[Constant.WH_DB_DRIVER_KEY]) self.wh_cursor = self.wh_con.cursor() self.aw_con = self.get_connection(args[Constant.AW_DB_URL_KEY], args[Constant.AW_DB_PORT_KEY], args[Constant.AW_DB_NAME_KEY], args[Constant.AW_DB_USERNAME_KEY], args[Constant.AW_DB_PASSWORD_KEY], args[Constant.AW_DB_DRIVER_KEY]) self.aw_cursor = self.aw_con.cursor() self.lookback_period = args[Constant.AW_EXEC_ETL_LOOKBACK_KEY] self.app_folder = args[Constant.WH_APP_FOLDER_KEY] self.metadata_folder = self.app_folder + "/" + str( SchedulerType.APPWORX) + "/" + str(self.app_id) self.last_execution_unix_time = None self.get_last_execution_unix_time() if not os.path.exists(self.metadata_folder): try: os.makedirs(self.metadata_folder) except Exception as e: self.logger.error(e)
def __init__(self, controller, demand_position): #self.start_event = threading.Event() self.start_event = controller._start_event self._controller, self._demand_position = controller, demand_position self.logger = LoggerFactory.getLogger("ContinuousPgmGratingIDGapEnergyMoveController:%s:DelayableCallable[%r]" % (controller.name, demand_position)) if self._controller.verbose: self.logger.info('__init__(%r, %r)...' % (controller.name, demand_position))
def __init__(self, hdfs_uri, kerberos=False, kerberos_principal=None, keytab_file=None): """ :param hdfs_uri: hdfs://hadoop-name-node:port :param kerberos: optional, if kerberos authentication is needed :param kerberos_principal: optional, [email protected] :param keytab_file: optional, absolute path to keytab file """ self.logger = LoggerFactory.getLogger(self.__class__.__name__) self.logger.info("keytab_file: " + keytab_file) hdfs_conf = Configuration() if hdfs_uri.startswith('hdfs://'): hdfs_conf.set(Hdfs.FS_DEFAULT_NAME_KEY, hdfs_uri) elif hdfs_uri > "": self.logger.error("%s is an invalid uri for hdfs namenode ipc bind." % hdfs_uri) if kerberos: # init kerberos and keytab if not kerberos_principal or not keytab_file or kerberos_principal == '' or keytab_file == '': print "Kerberos Principal and Keytab File Name/Path are required!" hdfs_conf.set("hadoop.security.authentication", "kerberos") hdfs_conf.set("dfs.namenode.kerberos.principal.pattern", "*") UserGroupInformation.setConfiguration(hdfs_conf) UserGroupInformation.loginUserFromKeytab(kerberos_principal, keytab_file) self.fs = Hdfs.get(hdfs_conf) requests.packages.urllib3.disable_warnings()
def setLogLevel(level): """setLogLevel(level) changes the log level to level, where level is a string (ERROR, WARN, INFO, DEBUG, and TRACE) """ logger = LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME) levels = {"ERROR": Level.ERROR, "WARN": Level.WARN, "INFO": Level.INFO, "DEBUG": Level.DEBUG, "TRACE": Level.TRACE} l = levels[level.upper()] logger.setLevel(l)
def __init__(self, args): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) username = args[Constant.WH_DB_USERNAME_KEY] password = args[Constant.WH_DB_PASSWORD_KEY] JDBC_DRIVER = args[Constant.WH_DB_DRIVER_KEY] JDBC_URL = args[Constant.WH_DB_URL_KEY] self.mp_gitli_project_file = args[Constant.GIT_PROJECT_OUTPUT_KEY] self.product_repo_file = args[Constant.PRODUCT_REPO_OUTPUT_KEY] self.product_repo_owner_file = args[ Constant.PRODUCT_REPO_OWNER_OUTPUT_KEY] self.app_id = args[Constant.APP_ID_KEY] self.wh_etl_exec_id = args[Constant.WH_EXEC_ID_KEY] self.conn_mysql = zxJDBC.connect(JDBC_URL, username, password, JDBC_DRIVER) self.conn_cursor = self.conn_mysql.cursor() if Constant.INNODB_LOCK_WAIT_TIMEOUT in args: lock_wait_time = args[Constant.INNODB_LOCK_WAIT_TIMEOUT] self.conn_cursor.execute("SET innodb_lock_wait_timeout = %s;" % lock_wait_time) self.logger.info( "Load Multiproduct Metadata into {}, app_id {}, wh_exec_id {}". format(JDBC_URL, self.app_id, self.wh_etl_exec_id))
def setLogLevel(level, loggerName='ROOT', temporary=True): """Set the log level for the given logger. Args: level: Logging level to set. Valid levels are 'TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR', and 'OFF'. Optional Args: loggerName: Name of a specific 'logger'. Default value is 'ROOT'. temporary: Whether or not the logging level should be saved between McIDAS-V sessions. Be aware that if set to True, loggerName must be 'ROOT'. Default value is True. Raises: ValueError: if temporary is True and loggerName is not 'ROOT'. """ if not temporary: if loggerName != 'ROOT': raise ValueError(_BAD_LOGGERNAME % (loggerName)) from edu.wisc.ssec.mcidasv.startupmanager.options import OptionMaster optMaster = OptionMaster.getInstance() optMaster.getLoggerLevelOption("LOG_LEVEL").setValue(level) optMaster.writeStartup() context = LoggerFactory.getILoggerFactory() logger = context.exists(loggerName) if not logger: logger = context.getLogger(loggerName) currentLevel = logger.getLevel() if not currentLevel: currentLevel = logger.getEffectiveLevel() convertedLevel = currentLevel.toLevel(level, currentLevel.INFO) logger.setLevel(convertedLevel)
def __init__(self, name, mca_root_pv, channelAdvanceInternalNotExternal=False): self.logger = LoggerFactory.getLogger( "McsWaveformChannelController:%s" % name) self.verbose = False self.name = name self.mca_root_pv = mca_root_pv self.pv_stop = CAClient(mca_root_pv + 'StopAll') self.pv_dwell = CAClient(mca_root_pv + 'Dwell') self.pv_channeladvance = CAClient(mca_root_pv + 'ChannelAdvance') self.pv_presetReal = CAClient(mca_root_pv + 'PresetReal') self.pv_erasestart = CAClient(mca_root_pv + 'EraseStart') self.channelAdvanceInternalNotExternal = channelAdvanceInternalNotExternal self.channelAdvanceInternal = 0 self.channelAdvanceExternal = 1 self.configure() self.exposure_time = 1 self.exposure_time_offset = .0 self.number_of_positions = 0 self.started = False self.hardware_trigger_provider = None self.stream = None
def __init__(self, name): self.setName(name) self.setInputNames([]) self.setExtraNames([]) self.setOutputFormat([]) self.setLevel(7) self.logger = LoggerFactory.getLogger(name)
def getLogAppenders( loggerName="console" ): loggerMap = [] myLogger = LoggerFactory.getLogger("logmanager") loggerContext = LoggerFactory.getILoggerFactory() myLogger.error("===================") appenderMap = {} for logger in loggerContext.getLoggerList(): appenderList = logger.iteratorForAppenders() while appenderList.hasNext(): appender = appenderList.next() logger.error("Logger %s" % appender.getName()) if appender.getName() not in appenderMap.keys(): loggerMap.append({"name": appender.getName(), "appender": "NA"}) myLogger.error("Appender %s: %s" % (appender.getName(), "NA")) myLogger.error("===================") return loggerMap
def __init__(self, name='max', labelList=('maxx','maxy','maxval', 'sum'), keyxlabel='maxx', keyylabel='maxy', formatString='Maximum value found to be at %f,%f (maxx,maxy) was %f (maxval). Sum was %f (sum)' ): self.logger = LoggerFactory.getLogger("SumMaxPositionAndValue:%s" % name) TwodDataSetProcessor.__init__(self, name, labelList, keyxlabel, keyylabel, formatString)
def __init__(self, args): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) self.wh_con = zxJDBC.connect(args[Constant.WH_DB_URL_KEY], args[Constant.WH_DB_USERNAME_KEY], args[Constant.WH_DB_PASSWORD_KEY], args[Constant.WH_DB_DRIVER_KEY]) self.wh_cursor = self.wh_con.cursor() self.app_id = int(args[Constant.JOB_REF_ID_KEY])
def __init__(self, args): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) self.wh_con = zxJDBC.connect(args[Constant.WH_DB_URL_KEY], args[Constant.WH_DB_USERNAME_KEY], args[Constant.WH_DB_PASSWORD_KEY], args[Constant.WH_DB_DRIVER_KEY]) self.wh_cursor = self.wh_con.cursor() self.app_id = int(args[Constant.APP_ID_KEY])
def cronTrigger(cronExpression, triggerName=None): logger = LoggerFactory.getLogger(logger_name + ".CronTrigger") triggerName = normalize_name(triggerName) return TriggerBuilder.create()\ .withId(triggerName)\ .withLabel(triggerName)\ .withTypeUID("timer.GenericCronTrigger")\ .withConfiguration(Configuration({"cronExpression": cronExpression}))\ .build()
def __init__(self, args): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) self.elasticsearch_index_url = args[Constant.WH_ELASTICSEARCH_URL_KEY] self.elasticsearch_port = args[Constant.WH_ELASTICSEARCH_PORT_KEY] self.wh_con = zxJDBC.connect(args[Constant.WH_DB_URL_KEY], args[Constant.WH_DB_USERNAME_KEY], args[Constant.WH_DB_PASSWORD_KEY], args[Constant.WH_DB_DRIVER_KEY]) self.wh_cursor = self.wh_con.cursor()
def createLogAppender( name, file ): lc = LoggerFactory.getILoggerFactory() ple = PatternLayoutEncoder() ple.setPattern("%date %level [%thread] %logger{10} [%file:%line] %msg%n") ple.setContext(lc) ple.start() fileAppender = FileAppender() fileAppender.setFile(file) fileAppender.setEncoder(ple) fileAppender.setContext(lc) fileAppender.start() logger = LoggerFactory.getLogger(string) logger.addAppender(fileAppender) #logger.setLevel(logLevels.DEBUG) # set to true if root should log too logger.setAdditive(True) return logger
def __init__(self, name, thp): # motors, maybe also detector to set the delay time ContinuousPgmEnergyMoveController.__init__(self, name, thp) self.logger = LoggerFactory.getLogger( "ContinuousThpMoveController:%s" % name) # TODO: Ideally, these should both be renammed ContinuousScannableMoveController, # since it appears that neither have any non generic requirements.
def setupSunExposureRule(exposureConfig, items): logger = LoggerFactory.getLogger(logger_name + ".setupSunExposureRule") logger.info("creating rule") exposure = {} for shutter in exposureConfig: exposure[shutter] = SunExposure(exposureConfig[shutter]) globalRules.append( SunExposureRule(exposure, items['azimuth'], items['elevation'], items['weather_sunny'], items['shutter_automation']))
def __init__(self, args): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) self.wh_con = zxJDBC.connect(args[Constant.WH_DB_URL_KEY], args[Constant.WH_DB_USERNAME_KEY], args[Constant.WH_DB_PASSWORD_KEY], args[Constant.WH_DB_DRIVER_KEY]) self.wh_cursor = self.wh_con.cursor() self.wh_exec_id = long(args[Constant.WH_EXEC_ID_KEY]) self.app_folder = args[Constant.WH_APP_FOLDER_KEY]
def __init__(self, path, event_kinds=[ENTRY_CREATE, ENTRY_DELETE, ENTRY_MODIFY], watch_subdirectories=False): AbstractWatchService.__init__(self, path) self.logger = LoggerFactory.getLogger(logger_name + ".FileWatcher") self.event_kinds = event_kinds self.watch_subdirectories = watch_subdirectories self.logger.debug("new fileWatcher for " + str(path) + " created.")
def __init__(self, args): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) self.elasticsearch_index_url = args[Constant.WH_ELASTICSEARCH_URL_KEY] self.elasticsearch_port = args[Constant.WH_ELASTICSEARCH_PORT_KEY] self.wh_con = zxJDBC.connect(args[Constant.WH_DB_URL_KEY], args[Constant.WH_DB_USERNAME_KEY], args[Constant.WH_DB_PASSWORD_KEY], args[Constant.WH_DB_DRIVER_KEY]) self.wh_cursor = self.wh_con.cursor(1)
def __init__(self, args, scheduler_type): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) self.app_id = int(args[Constant.APP_ID_KEY]) self.wh_con = zxJDBC.connect(args[Constant.WH_DB_URL_KEY], args[Constant.WH_DB_USERNAME_KEY], args[Constant.WH_DB_PASSWORD_KEY], args[Constant.WH_DB_DRIVER_KEY]) self.wh_cursor = self.wh_con.cursor() self.app_folder = args[Constant.WH_APP_FOLDER_KEY] self.metadata_folder = self.app_folder + "/" + str(scheduler_type) + "/" + str(self.app_id)
def __init__(self): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) username = args[Constant.HIVE_METASTORE_USERNAME] password = args[Constant.HIVE_METASTORE_PASSWORD] jdbc_driver = args[Constant.HIVE_METASTORE_JDBC_DRIVER] jdbc_url = args[Constant.HIVE_METASTORE_JDBC_URL] self.conn_hms = zxJDBC.connect(jdbc_url, username, password, jdbc_driver) self.curs = self.conn_hms.cursor() dependency_instance_file = args[Constant.HIVE_DEPENDENCY_CSV_FILE_KEY] self.instance_writer = FileWriter(dependency_instance_file)
def __init__(self, path, iFileLoader=None, fileLoadTimout=None, printNfsTimes=False, wait_for_exposure_callable=None): self.logger = LoggerFactory.getLogger("LazyDataSetProvider:%s" % path) self.path = path self.iFileLoader = iFileLoader self.fileLoadTimout = fileLoadTimout self.printNfsTimes = printNfsTimes self.wait_for_exposure_callable = wait_for_exposure_callable self.configureLock = threading.Lock() self.dataset = None
def __init__(self, deployed, steps): self.logger = LoggerFactory.getLogger("cassandra") self.deployed = deployed self.__deployed = deployed._delegate self.script_pattern = re.compile(self.deployed.scriptRecognitionRegex) self.rollback_pattern = re.compile( self.deployed.rollbackScriptRecognitionRegex) self.artifact_folder = deployed.getFile().path self.steps = steps self.logger.info("deployed_helper Init done")
def __init__(self, name, pgm_grat_pitch, pgm_mirr_pitch, pgmpvroot, energy, idupvroot, iddpvroot, move_pgm=True, move_id=True): # motors, maybe also detector to set the delay time self.logger = LoggerFactory.getLogger("ContinuousPgmGratingIDGapEnergyMoveController:%s" % name) self.verbose = False self.setName(name) self._start_event = threading.Event() self._movelog_time = datetime.now() #PGM self._pgm_grat_pitch = pgm_grat_pitch self._pgm_mirr_pitch = pgm_mirr_pitch self._pgm_grat_pitch_speed_orig = None self._pgm_runupdown_time = None self._move_pgm = move_pgm self._move_id = move_id self.pvs = PvManager({'grating_density': 'NLINES', 'cff': 'CFF', 'grating_offset': 'GRTOFFSET', 'plane_mirror_offset': 'MIROFFSET', 'pgm_energy': 'ENERGY', 'grating_pitch': 'GRT:PITCH', 'mirror_pitch': 'MIR:PITCH', 'energy_calibration_gradient': 'MX', 'energy_calibration_reference': 'REFERENCE'}, pgmpvroot) if installation.isLive(): self.pvs.configure() #ID self.energy = energy self.idd = energy.idd self.idu = energy.idu self._id_gap_speed_orig=None self._id_runupdown_time = None self.idupvs = PvManager({'vel':'BLGSETVEL', 'acc':'IDGSETACC'}, idupvroot) if installation.isLive(): self.idupvs.configure() self.iddpvs = PvManager({'vel':'BLGSETVEL', 'acc':'IDGSETACC'}, iddpvroot) if installation.isLive(): self.iddpvs.configure() self.grating_pitch_positions=[] self.mirror_pitch_positions=[] self.pgm_energy_positions=[] self._start_time = None self.idspeedfactor=1.0 self.pgmspeedfactor=1.0 self.idstartdelaytime=0.0 self._move_start = 0.0 self._move_end = 1.0 self._move_step = 0.1 self._triggerPeriod = 0.0 self.idcontrols = None self.continuousMovingStarted = False
def getLogFile(): # TODO(jon): this will likely have to change as the complexity of # logback.xml increases. :( # should return the "default" logging context context = LoggerFactory.getILoggerFactory() assert context.getName() == 'default', _CONTEXT_ASSERT_MSG % context.getName() logger = context.getLogger(Logger.ROOT_LOGGER_NAME) # for now I'll assume that there's only ONE appender per logger appender = [x for x in logger.iteratorForAppenders()].pop() assert isinstance(appender, FileAppender), _APPENDER_ASSERT_MSG % type(appender).getCanonicalName() return appender.getFile()
def __init__(self, hdfs_uri, kerberos=False, kerberos_principal=None, keytab_file=None): """ :param hdfs_uri: hdfs://hadoop-name-node:port :param kerberos: optional, if kerberos authentication is needed :param kerberos_principal: optional, [email protected] :param keytab_file: optional, user.keytab or ~/.kerberos/user.keytab """ self.logger = LoggerFactory.getLogger(self.__class__.__name__) hdfs_conf = Configuration() if hdfs_uri.startswith('hdfs://'): hdfs_conf.set(Hdfs.FS_DEFAULT_NAME_KEY, hdfs_uri) elif hdfs_uri > "": self.logger.error( "%s is an invalid uri for hdfs namenode ipc bind." % hdfs_uri) if kerberos == True: # init kerberos and keytab if not kerberos_principal or not keytab_file or kerberos_principal == '' or keytab_file == '': print "Kerberos Principal and Keytab File Name/Path are required!" keytab_path = keytab_file if keytab_file.startswith('/'): if os.path.exists(keytab_file): keytab_path = keytab_file print "Using keytab at %s" % keytab_path else: # try relative path all_locations = [ os.getcwd(), expanduser("~") + "/.ssh", expanduser("~") + "/.kerberos", expanduser("~") + "/.wherehows", os.getenv("APP_HOME"), os.getenv("WH_HOME") ] for loc in all_locations: if os.path.exists(loc + '/' + keytab_file): keytab_path = loc + '/' + keytab_file print "Using keytab at %s" % keytab_path break hdfs_conf.set("hadoop.security.authentication", "kerberos") hdfs_conf.set("dfs.namenode.kerberos.principal.pattern", "*") UserGroupInformation.setConfiguration(hdfs_conf) UserGroupInformation.loginUserFromKeytab(kerberos_principal, keytab_path) self.fs = Hdfs.get(hdfs_conf) requests.packages.urllib3.disable_warnings()
def __init__(self, args): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) self.wh_con = zxJDBC.connect(args[Constant.WH_DB_URL_KEY], args[Constant.WH_DB_USERNAME_KEY], args[Constant.WH_DB_PASSWORD_KEY], args[Constant.WH_DB_DRIVER_KEY]) self.wh_cursor = self.wh_con.cursor() self.app_id = int(args[Constant.JOB_REF_ID_KEY]) self.group_app_id = int(args[Constant.LDAP_GROUP_APP_ID_KEY]) self.app_folder = args[Constant.WH_APP_FOLDER_KEY] self.metadata_folder = self.app_folder + "/" + str(self.app_id) self.ceo_user_id = args[Constant.LDAP_CEO_USER_ID_KEY]
def __init__(self, args): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) self.app_id = int(args[Constant.JOB_REF_ID_KEY]) self.wh_exec_id = long(args[Constant.WH_EXEC_ID_KEY]) self.wh_con = zxJDBC.connect(args[Constant.WH_DB_URL_KEY], args[Constant.WH_DB_USERNAME_KEY], args[Constant.WH_DB_PASSWORD_KEY], args[Constant.WH_DB_DRIVER_KEY]) self.wh_cursor = self.wh_con.cursor() self.look_back_days = args[Constant.AW_LINEAGE_ETL_LOOKBACK_KEY] self.last_execution_unix_time = None self.get_last_execution_unix_time()
def __init__(self): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) requests.packages.urllib3.disable_warnings() self.app_id = int(args[Constant.APP_ID_KEY]) self.wh_exec_id = long(args[Constant.WH_EXEC_ID_KEY]) self.project_writer = FileWriter(args[Constant.GIT_PROJECT_OUTPUT_KEY]) self.repo_writer = FileWriter(args[Constant.PRODUCT_REPO_OUTPUT_KEY]) self.repo_owner_writer = FileWriter(args[Constant.PRODUCT_REPO_OWNER_OUTPUT_KEY]) self.multiproduct = {} self.git_repo = {} self.product_repo = []
def __init__(self): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) # connection username = args[Constant.HIVE_METASTORE_USERNAME] password = args[Constant.HIVE_METASTORE_PASSWORD] jdbc_driver = args[Constant.HIVE_METASTORE_JDBC_DRIVER] jdbc_url = args[Constant.HIVE_METASTORE_JDBC_URL] self.conn_hms = zxJDBC.connect(jdbc_url, username, password, jdbc_driver) self.curs = self.conn_hms.cursor() # variable self.dataset_dict = {}
def getLogLevel(loggerName='ROOT'): logger = LoggerFactory.getLogger(loggerName) if logger.getLevel(): level = str(logger.getLevel()) else: level = None if logger.getEffectiveLevel(): effectiveLevel = str(logger.getEffectiveLevel()) else: effectiveLevel = None return { 'level': level, 'effectiveLevel': effectiveLevel }
def __init__(self): func = formData.get("func") result = "{}" resultType = "text/plain" if func == "reindex": file = formData.get("file") portalId = formData.get("portalId") portalManager = Services.getPortalManager() if file: print " * Reindexing: formData=%s" % file portalManager.indexObject(file) sessionState.set("reindex/lastResult", "success") result = '{ status: "ok" }' elif portalId: portal = portalManager.get(portalId) print " * Reindexing: Portal=%s" % portal.name portalManager.indexPortal(portal) sessionState.set("reindex/lastResult", "success") result = '{ status: "ok" }' else: sessionState.set("reindex/lastResult", "failed") result = '{ status: "failed" }' elif func == "get-state": result = '{ running: "%s", lastResult: "%s" }' % \ (sessionState.get("reindex/running"), sessionState.get("reindex/lastResult")) elif func == "get-log": context = LoggerFactory.getILoggerFactory() logger = context.getLogger("au.edu.usq.fascinator.IndexClient") it = logger.iteratorForAppenders() appender = logger.getAppender("reindex") layout = HTMLLayout() layout.setContext(context) layout.setPattern("%d%level%msg") layout.setTitle("Index log") layout.start() result = "<table>" count = appender.getLength() if count == -1: result += "<tr><td>Failed</td></tr>" elif count == 0: result += "<tr><td>No logging events</td></tr>" else: for i in range(0, count): event = appender.get(i) result += layout.doLayout(event) result += "</table>" resultType = "text/html" writer = response.getPrintWriter(resultType) writer.println(result) writer.close()
def __init__(self, args): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) # connection self.username = args[Constant.HIVE_METASTORE_USERNAME] self.password = args[Constant.HIVE_METASTORE_PASSWORD] self.jdbc_driver = args[Constant.HIVE_METASTORE_JDBC_DRIVER] self.jdbc_url = args[Constant.HIVE_METASTORE_JDBC_URL] self.connection_interval = int(args[Constant.HIVE_METASTORE_RECONNECT_TIME]) self.logger.info("DB re-connection interval: %d" % self.connection_interval) self.db_whitelist = args[Constant.HIVE_DATABASE_WHITELIST_KEY] if Constant.HIVE_DATABASE_WHITELIST_KEY in args else "''" self.db_blacklist = args[Constant.HIVE_DATABASE_BLACKLIST_KEY] if Constant.HIVE_DATABASE_BLACKLIST_KEY in args else "''" self.logger.info("DB whitelist: " + self.db_whitelist) self.logger.info("DB blacklist: " + self.db_blacklist) self.conn_hms = None self.connect_time = None self.db_connect(True) hdfs_namenode_ipc_uri = args.get(Constant.HDFS_NAMENODE_IPC_URI_KEY, None) kerberos_principal = args.get(Constant.KERBEROS_PRINCIPAL_KEY, None) keytab_file = args.get(Constant.KERBEROS_KEYTAB_FILE_KEY, None) kerberos_auth = False if Constant.KERBEROS_AUTH_KEY in args: kerberos_auth = FileUtil.parse_bool(args[Constant.KERBEROS_AUTH_KEY], False) self.table_whitelist_enabled = False if Constant.HIVE_TABLE_WHITELIST_ENABLED in args: self.table_whitelist_enabled = FileUtil.parse_bool(args[Constant.HIVE_TABLE_WHITELIST_ENABLED], False) self.table_blacklist_enabled = False if Constant.HIVE_TABLE_BLACKLIST_ENABLED in args: self.table_blacklist_enabled = FileUtil.parse_bool(args[Constant.HIVE_TABLE_BLACKLIST_ENABLED], False) self.schema_url_helper = SchemaUrlHelper.SchemaUrlHelper(hdfs_namenode_ipc_uri, kerberos_auth, kerberos_principal, keytab_file) # global variables self.databases = None self.db_dict = {} # name : index self.table_dict = {} # fullname : index self.dataset_dict = {} # name : index self.instance_dict = {} # name : index self.serde_param_columns = [] # counting statistics self.external_url = 0 self.hdfs_count = 0 self.schema_registry_count = 0
def __init__(self): print " * backup.py: formData=%s" % formData result = "{}" resultType = "text/plain" portalManager = Services.getPortalManager() func = formData.get("func") if func == "backup-view": print " * backup.py: backup portal %s" % portalId portal = portalManager.get(portalId) if portal: portalManager.backup(portal) sessionState.set("backup/lastResult", "success") result = '{ status: "ok" }' else: sessionState.set("backup/lastResult", "failed") result = '{ status: "failed" }' elif func == "get-state": result = '{ running: "%s", lastResult: "%s" }' % ( sessionState.get("backup/running"), sessionState.get("backup/lastResult"), ) elif func == "get-log": context = LoggerFactory.getILoggerFactory() logger = context.getLogger("au.edu.usq.fascinator.BackupClient") it = logger.iteratorForAppenders() appender = logger.getAppender("backup") layout = HTMLLayout() layout.setContext(context) layout.setPattern("%d%level%msg") layout.setTitle("Backup log") layout.start() result = "<table>" count = appender.getLength() if count == -1: result += "<tr><td>Failed</td></tr>" elif count == 0: result += "<tr><td>No logging events</td></tr>" else: for i in range(0, count): event = appender.get(i) result += layout.doLayout(event) result += "</table>" resultType = "text/html" writer = response.getPrintWriter(resultType) writer.println(result) writer.close()
def __init__(self, args): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) self.args = args self.app_id = int(args[Constant.APP_ID_KEY]) self.group_app_id = int(args[Constant.LDAP_GROUP_APP_ID_KEY]) self.wh_exec_id = long(args[Constant.WH_EXEC_ID_KEY]) self.app_folder = args[Constant.WH_APP_FOLDER_KEY] self.metadata_folder = self.app_folder + "/" + str(self.app_id) if not os.path.exists(self.metadata_folder): try: os.makedirs(self.metadata_folder) except Exception as e: self.logger.error(e) self.ldap_user = set() self.group_map = dict() self.group_flatten_map = dict()
def __init__(self, name, rootNamespace=None, scannablesToRead=[], readFromNexus=False): """Create a MetadataCollector Scannable, for use with SRSDataWriters """ self.name = name self.inputNames = [] self.extraNames = [] self.outputFormat = [] self.logger = LoggerFactory.getLogger("metadata") if scannablesToRead and readFromNexus: self.logger.warn("%s: When readFromNexus=True the specified scannablesToRead are ignored!" % name) self.readFromNexus = readFromNexus self.rootNamespaceDict = rootNamespace self.scannables_to_read = scannablesToRead self.verbose = False self.quiet = False self.prepend_keys_with_scannable_names = True
def __init__(self, file_content=None, log_file_name=None): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) self.text = None if log_file_name is not None: self.log_file_name = log_file_name file_ext = os.path.splitext(self.log_file_name)[1][1:] self.logger.info(file_ext) self.logger.info(self.log_file_name) if file_ext == 'gz': try: self.text = gzip.GzipFile(mode='r', filename=self.log_file_name).read() except: self.logger.error('exception') self.logger.error(str(sys.exc_info()[0])) else: self.text = open(self.log_file_name,'r').read() else: self.text = file_content
def setLogLevel(level, loggerName='ROOT', temporary=True): if not temporary: if loggerName != 'ROOT': raise ValueError(_BAD_LOGGERNAME % (loggerName)) from edu.wisc.ssec.mcidasv.startupmanager.options import OptionMaster optMaster = OptionMaster.getInstance() optMaster.getLoggerLevelOption("LOG_LEVEL").setValue(level) optMaster.writeStartup() context = LoggerFactory.getILoggerFactory() logger = context.exists(loggerName) if not logger: logger = context.getLogger(loggerName) currentLevel = logger.getLevel() if not currentLevel: currentLevel = logger.getEffectiveLevel() convertedLevel = currentLevel.toLevel(level, currentLevel.INFO) logger.setLevel(convertedLevel)
def __init__(self, hdfs_uri, kerberos=False, kerberos_principal=None, keytab_file=None): """ :param hdfs_uri: hdfs://hadoop-name-node:port :param kerberos: optional, if kerberos authentication is needed :param kerberos_principal: optional, [email protected] :param keytab_file: optional, user.keytab or ~/.kerberos/user.keytab """ self.logger = LoggerFactory.getLogger(self.__class__.__name__) hdfs_conf = Configuration() if hdfs_uri.startswith('hdfs://'): hdfs_conf.set(Hdfs.FS_DEFAULT_NAME_KEY, hdfs_uri) elif hdfs_uri > "": self.logger.error("%s is an invalid uri for hdfs namenode ipc bind." % hdfs_uri) if kerberos == True: # init kerberos and keytab if not kerberos_principal or not keytab_file or kerberos_principal == '' or keytab_file == '': print "Kerberos Principal and Keytab File Name/Path are required!" keytab_path = keytab_file if keytab_file.startswith('/'): if os.path.exists(keytab_file): keytab_path = keytab_file print "Using keytab at %s" % keytab_path else: # try relative path all_locations = [os.getcwd(), expanduser("~") + "/.ssh", expanduser("~") + "/.kerberos", expanduser("~") + "/.wherehows", os.getenv("APP_HOME"), os.getenv("WH_HOME")] for loc in all_locations: if os.path.exists(loc + '/' + keytab_file): keytab_path = loc + '/' + keytab_file print "Using keytab at %s" % keytab_path break hdfs_conf.set("hadoop.security.authentication", "kerberos") hdfs_conf.set("dfs.namenode.kerberos.principal.pattern", "*") UserGroupInformation.setConfiguration(hdfs_conf) UserGroupInformation.loginUserFromKeytab(kerberos_principal, keytab_path) self.fs = Hdfs.get(hdfs_conf) requests.packages.urllib3.disable_warnings()
def __init__(self, args): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) self.app_id = int(args[Constant.JOB_REF_ID_KEY]) self.wh_exec_id = long(args[Constant.WH_EXEC_ID_KEY]) self.oz_con = zxJDBC.connect(args[Constant.OZ_DB_URL_KEY], args[Constant.OZ_DB_USERNAME_KEY], args[Constant.OZ_DB_PASSWORD_KEY], args[Constant.OZ_DB_DRIVER_KEY]) self.oz_cursor = self.oz_con.cursor() self.lookback_period = args[Constant.OZ_EXEC_ETL_LOOKBACK_MINS_KEY] self.app_folder = args[Constant.WH_APP_FOLDER_KEY] self.metadata_folder = self.app_folder + "/" + str(SchedulerType.OOZIE) + "/" + str(self.app_id) self.oz_version = 4.0 if not os.path.exists(self.metadata_folder): try: os.makedirs(self.metadata_folder) except Exception as e: self.logger.error(e) self.get_oozie_version()
def __init__(self, args): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) username = args[Constant.WH_DB_USERNAME_KEY] password = args[Constant.WH_DB_PASSWORD_KEY] JDBC_DRIVER = args[Constant.WH_DB_DRIVER_KEY] JDBC_URL = args[Constant.WH_DB_URL_KEY] self.database_scm_repo_file = args[Constant.DATABASE_SCM_REPO_OUTPUT_KEY] self.app_id = args[Constant.APP_ID_KEY] self.wh_etl_exec_id = args[Constant.WH_EXEC_ID_KEY] self.conn_mysql = zxJDBC.connect(JDBC_URL, username, password, JDBC_DRIVER) self.conn_cursor = self.conn_mysql.cursor() if Constant.INNODB_LOCK_WAIT_TIMEOUT in args: lock_wait_time = args[Constant.INNODB_LOCK_WAIT_TIMEOUT] self.conn_cursor.execute("SET innodb_lock_wait_timeout = %s;" % lock_wait_time) self.logger.info("Load Code Search CSV into {}, app_id {}, wh_exec_id {}" .format(JDBC_URL, self.app_id, self.wh_etl_exec_id))
def __init__(self, args): self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__) self.app_id = int(args[Constant.JOB_REF_ID_KEY]) self.wh_exec_id = long(args[Constant.WH_EXEC_ID_KEY]) self.aw_con = zxJDBC.connect(args[Constant.WH_DB_URL_KEY], args[Constant.WH_DB_USERNAME_KEY], args[Constant.WH_DB_PASSWORD_KEY], args[Constant.WH_DB_DRIVER_KEY]) self.aw_cursor = self.aw_con.cursor() self.remote_hadoop_script_dir = args[Constant.AW_REMOTE_HADOOP_SCRIPT_DIR] self.local_script_path = args[Constant.AW_LOCAL_SCRIPT_PATH] self.remote_script_path = args[Constant.AW_REMOTE_SCRIPT_PATH] self.aw_archive_dir = args[Constant.AW_ARCHIVE_DIR] # self.aw_log_url = args[Constant.AW_LOG_URL] self.bteq_source_target_override = args[Constant.AW_BTEQ_SOURCE_TARGET_OVERRIDE] self.metric_override = args[Constant.AW_METRIC_OVERRIDE] self.skip_already_parsed = args[Constant.AW_SKIP_ALREADY_PARSED] self.look_back_days = args[Constant.AW_LINEAGE_ETL_LOOKBACK_KEY] self.last_execution_unix_time = None self.get_last_execution_unix_time()