def __init__(self, adapter, job_status_update_callback, job_delete_callback, job_accounting_update_callback): self.__monitor = False self.__thread_event = gevent.event.Event() self.logger = LogManager.get_instance().get_logger( self.__class__.__name__) self.adapter = adapter self.job_status_update_callback = job_status_update_callback self.job_delete_callback = job_delete_callback self.job_accounting_update_callback = job_accounting_update_callback # Configuration cm = ConfigManager.get_instance() self.max_job_status_retrieval_count = int( cm.get_config_option( 'JobMonitor', 'max_job_status_retrieval_count', JobMonitor.MAX_JOB_STATUS_RETRIEVAL_ERROR_COUNT)) self.logger.debug('Using max job status retrieval count: %s' % self.max_job_status_retrieval_count) self.monitor_poll_period_in_seconds = float( cm.get_config_option('JobMonitor', 'monitor_poll_period_in_seconds', JobMonitor.MONITOR_POLL_PERIOD_IN_SECONDS)) self.logger.debug('Using monitor poll period: %s [seconds]' % self.monitor_poll_period_in_seconds)
def __init__(self, channel_validate_callback, channel_delete_callback): self.__monitor = False self.logger = LogManager.get_instance().get_logger( self.__class__.__name__) self.channel_delete_callback = channel_delete_callback self.channel_validate_callback = channel_validate_callback self.__thread_event = gevent.event.Event()
def __init__(self, k8s_registry_path = ""): self.logger = LogManager.get_instance().get_logger( self.__class__.__name__) self.logger.info("K8s registry path: %s" % k8s_registry_path) self.configure() self.channel_name = None with open(os.path.join(os.path.dirname(__file__), K8SAdapter.URB_EXECUTOR_RUNNER + ".yaml")) as fj: self.job = yaml.load(fj) if len(k8s_registry_path) != 0: self.job['spec']['template']['spec']['containers'][0]['image'] = k8s_registry_path + "/" + K8SAdapter.URB_EXECUTOR_RUNNER # URB_MASTER environment variable has to be set in urb-master.yaml urb_master = os.environ.get('URB_MASTER') if not urb_master: self.logger.error("URB_MASTER is not set in urb-master.yaml") urb_master_env = {'name' : 'URB_MASTER', 'value' : urb_master } self.job['spec']['template']['spec']['containers'][0]['env'].append(urb_master_env) self.logger.debug("Loaded job yaml template: %s" % self.job) # uuid.time_low size is 8, plus 2 dashes ('-') self.job_name_template_size = len(self.job['metadata']['name']) + K8SAdapter.UUID_SIZE + 2 if self.job_name_template_size >= K8SAdapter.JOB_NAME_MAX_SIZE: self.logger.error("Job name template %s is too long, should be < %s bytes" % (self.job_name_template_size, K8SAdapter.JOB_NAME_MAX_SIZE)) self.image = self.job['spec']['template']['spec']['containers'][0]['image'] self.core_v1 = client.CoreV1Api() self.batch_v1 = client.BatchV1Api()
def __init__(self): """ Initialize factory instance. """ # Only initialize once. if AdapterManager.__instance is not None: return self.logger = LogManager.get_instance().get_logger( self.__class__.__name__) self.adapter_dict = {}
def __init__(self, name, message_broker): self.name = name self.message_broker = message_broker self.read_callback = None self.write_callback = None self.__listen = False self.logger = LogManager.get_instance().get_logger( self.__class__.__name__)
def __init__(self): ObjectTracker.__init__(self) self.logger = LogManager.get_instance().get_logger( self.__class__.__name__) self.framework_id_dict = {} self.finished_framework_id_deque = deque( [], FrameworkTracker.MAX_NUMBER_OF_FINISHED_FRAMEWORKS) self.finished_framework_dict = {}
def __init__(self): """ Initialize factory instance. """ # Only initialize once. if DBManager.__instance is not None: return self.logger = LogManager.get_instance().get_logger( self.__class__.__name__) self.db_client = None
def __init__(self, db_uri='mongodb://localhost:27017/', db_name=DB_NAME, expire=TTL_MONTHS): self.logger = LogManager.get_instance().get_logger(self.__class__.__name__) try: self.client = MongoClient(db_uri,use_greenlets=True) self.db = self.client[db_name] self.expire = expire except Exception, ex: self.logger.warn('Cannot connect to Mongo DB: %s' % ex) raise DBError(exception=ex)
def __init__(self, channel, initial_retry_interval, max_retry_count): self.__manage = False self.logger = LogManager.get_instance().get_logger( self.__class__.__name__) self.__thread_event = gevent.event.Event() self.channel = channel retry_channel_name = channel.name + '.retry' cf = ChannelFactory.get_instance() self.retry_channel = cf.create_channel(retry_channel_name) self.initial_retry_interval = initial_retry_interval self.max_retry_count = max_retry_count
def run(): # from gevent import monkey; monkey.patch_all() from gevent import monkey; monkey.patch_socket() logger = LogManager.get_instance().get_logger(__name__) logger.debug("run begins") try: executor = ExecutorRunner() executor.run() except KeyboardInterrupt, ex: logger.info('KeyboardInterrupt: %s' % ex) pass
def __init__(self, elected_callback=None, demoted_callback=None): self.__run = False self.__master_broker = None self.__set_server_id() self.__thread_event = gevent.event.Event() self.logger = LogManager.get_instance().get_logger( self.__class__.__name__) self.elected_callback = elected_callback self.demoted_callback = demoted_callback
def __init__(self, db_uri='mongodb://localhost:27017/', db_name=DB_NAME, expire=TTL_MONTHS): self.logger = LogManager.get_instance().get_logger(self.__class__.__name__) try: # for pymongo >= 3.0 constructor doesn't throw anymore self.client = MongoClient(db_uri, serverSelectionTimeoutMS = 5000) # check connection with ismaster command self.client.admin.command('ismaster') self.logger.info("Connected to Mongo DB: %s" % db_uri) self.db = self.client[db_name] self.expire = expire self.active = True self.enable_sleep = URBMongoClient.INITIAL_ENABLE_SLEEP except Exception, ex: self.logger.warn('Cannot connect to Mongo DB: %s' % ex) raise DBError(exception=ex)
def __init__(self, defaults={}, skip_cmd_line_config=False): self.server = None self.shutdown = False self.shutdown_event = gevent.event.Event() # Config manager ConfigManager.get_instance().set_config_defaults(defaults) # Options self.option_parser = OptionParser() self.options = {} self.args = [] if not skip_cmd_line_config: self.option_parser.add_option('-d', action="store_true", dest='daemon_flag', help="Run service as a daemon") self.option_parser.add_option( '-p', '--pidfile', dest='pid_file', default=None, help="Store PID in the specified file") self.option_parser.add_option('-s', '--silent', action="store_true", dest='silent', help="No logging to stdout") self.parse_options() if self.get_daemon_flag() or self.get_silent(): # Disable screen logging. ConfigManager.get_instance().set_console_log_level('notset') # Logger self.logger = LogManager.get_instance().get_logger('URBService') # Configuration self.configure() # Signal handlers if not skip_cmd_line_config: signal.signal(signal.SIGINT, self.signal_handler) signal.signal(signal.SIGUSR1, self.signal_handler) signal.signal(signal.SIGTERM, self.signal_handler)
def __init__(self, db_client): self.logger = LogManager.get_instance().get_logger( self.__class__.__name__) self.db_client = db_client curr_ttl = -1 try: index_info = self.db_client.get_index_information('events') self.logger.trace('index_info=%s' % index_info) if 'ttl_index' in index_info: if 'key' in index_info['ttl_index'].keys(): if 'timestamp' in index_info['ttl_index']['key'][0]: curr_ttl = index_info['ttl_index'][ 'expireAfterSeconds'] self.logger.debug('Current Mongo TTL: %s ' % curr_ttl) if curr_ttl == -1: self.logger.debug( "No 'expireAfterSeconds' in index information for 'events' collection" ) except Exception, ex: self.logger.warn('Cannot get index info: %s' % ex)
def __init__(self): """ Initialize factory instance. """ # Only initialize once. if ChannelFactory.__instance is not None: return cm = ConfigManager.get_instance() message_broker = cm.get_config_option('ChannelFactory', 'message_broker') self.logger = LogManager.get_instance().get_logger( self.__class__.__name__) self.logger.debug('Using message broker: %s' % message_broker) if message_broker is None: raise ConfigurationError( 'Message broker parameter missing from config file: %s' % cm.get_config_file()) dot_pos = message_broker.find('.') self.message_broker_module = message_broker[0:dot_pos] self.message_broker_constructor = message_broker[dot_pos + 1:] self.message_broker_class = \ self.message_broker_constructor.split('(')[0] self.message_broker = self.__get_message_broker()
def __init__(self, service_monitor_channel): self.service_monitor_channel = service_monitor_channel self.logger = LogManager.get_instance().get_logger( self.__class__.__name__)
def __init__(self): self.logger = LogManager.get_instance().get_logger( self.__class__.__name__) self.channel_name = None self.configure()
def __init__(self): self.logger = LogManager.get_instance().get_logger( self.__class__.__name__) self.logger.trace("__init__: self=%s" % self) self.lock = threading.RLock() self.object_dict = {}
def __init__(self, channel_name=None): self.name = self.__class__.__name__ self.logger = LogManager.get_instance().get_logger(self.name) cf = ChannelFactory.get_instance() self.channel = cf.create_channel(channel_name) self.channel.register_read_callback(self.handle)
def configure(self): cm = ConfigManager.get_instance() self.service_monitor_endpoint = cm.get_config_option('ExecutorRunner', 'service_monitor_endpoint') mesos_master_endpoint = cm.get_config_option('ExecutorRunner', 'mesos_master_endpoint') self.mesos_work_dir = cm.get_config_option('ExecutorRunner', 'mesos_work_dir') % { 'tmp' : os.environ.get('TMP','') } # Create our working directory before we start logging if not os.path.exists(self.mesos_work_dir): os.makedirs(self.mesos_work_dir) os.chdir(self.mesos_work_dir) self.logger = LogManager.get_instance().get_logger(self.__class__.__name__) self.logger.debug('Config file: %s' % cm.get_config_file()) cf = ChannelFactory.get_instance() self.service_monitor_channel = cf.create_channel(self.service_monitor_endpoint) self.mesos_channel = cf.create_channel(mesos_master_endpoint) self.channel_name = cf.get_unique_channel_name() self.logger.debug('Got my channel name: %s' % self.channel_name) self.host = cf.get_message_broker_connection_host() self.port = cf.get_message_broker_connection_port() # The service will send messages to our notify channel channel_id = MessagingUtility.get_endpoint_id(self.channel_name) self.notify_channel_name = MessagingUtility.get_notify_channel_name(None,channel_id) self.handler_list = [ExecutorHandler(self.notify_channel_name,self)] # Get various id objects self.framework_id = { 'value' : os.environ['URB_FRAMEWORK_ID'] } if 'JOB_ID' in os.environ: self.job_id = os.environ['JOB_ID'] self.task_id = os.environ.get('SGE_TASK_ID', '1') slave_id = NamingUtility.create_slave_id(self.job_id, self.task_id, self.notify_channel_name) else: self.job_id = uuid.uuid1().hex self.logger.error('Environment variable JOB_ID is not defined, autogenerating job id: ' % self.job_id) self.task_id = "1" slave_id = NamingUtility.create_slave_id(self.job_id, self.task_id , self.notify_channel_name) self.slave_id = { "value" : slave_id } self.logger.debug('slave id: %s' % self.slave_id) self.slave_info = { 'hostname' : self.host, 'port' : self.port, 'id' : self.slave_id } self.dummy_framework_info = { 'name':'default', 'user':'******', } self.heartbeat_channel_info = { 'channel_id' : self.notify_channel_name, 'framework_id' : self.framework_id['value'], 'slave_id' : self.slave_id['value'], 'endpoint_type' : 'executor_runner', }
def __init__(self, db_client): self.logger = LogManager.get_instance().get_logger( self.__class__.__name__) self.db_client = db_client
from google.protobuf import descriptor_pb2 from google.protobuf import descriptor from google.protobuf import reflection import mesos_pb2 import executor_pb2 from urb.log.log_manager import LogManager from gevent import monkey monkey.patch_all() app = Flask(__name__) app.debug = True logger = LogManager.get_instance().get_logger(__name__) def request_debug(msg, r): logger.debug("%s: request=%s" % (msg, r)) logger.debug("is_json: %s" % r.is_json) logger.debug("request.headers=%s" % r.headers) logger.debug("request.environ=%s" % r.environ) logger.debug("request.data=%s" % r.data) @app.route('/redirect', methods=['GET', 'POST']) def redirect(): request_debug("/redirect", request) resp = Response(status=200) return resp
def __init__(self, port=5051): self.name = self.__class__.__name__ self.port = port self.logger = LogManager.get_instance().get_logger(self.name) self.logger.debug("__init__")