def __init__(self, shard, contest_id=None): """If contest_id is not None, we assume the user wants the autorestart feature. """ logger.initialize(ServiceCoord("ResourceService", shard)) Service.__init__(self, shard, custom_logger=logger) self.contest_id = contest_id # _local_store is a dictionary indexed by time in int(epoch) self._local_store = [] # Floating point epoch using for precise measurement of percents self._last_saved_time = time.time() # Starting point for cpu times self._prev_cpu_times = self._get_cpu_times() # Sorted list of ServiceCoord running in the same machine self._local_services = self._find_local_services() # Dict service with bool to mark if we will restart them. self._will_restart = dict( (service, None if self.contest_id is None else True) for service in self._local_services) # Found process associate to the ServiceCoord. self._procs = dict((service, None) for service in self._local_services) # Previous cpu time for each service. self._services_prev_cpu_times = dict( (service, (0.0, 0.0)) for service in self._local_services) # Start finding processes and their cputimes. self._store_resources(store=False) self.add_timeout(self._store_resources, None, 5) if self.contest_id is not None: self._launched_processes = set([]) self.add_timeout(self._restart_services, None, 5, immediately=True)
def __init__(self, shard): logger.initialize(ServiceCoord("Worker", shard)) Service.__init__(self, shard, custom_logger=logger) self.file_cacher = FileCacher(self) self.work_lock = gevent.coros.RLock() self._ignore_job = False
def __init__(self, shard, contest_id): logger.initialize(ServiceCoord("EvaluationService", shard)) Service.__init__(self, shard, custom_logger=logger) self.contest_id = contest_id self.queue = JobQueue() self.pool = WorkerPool(self) self.scoring_service = self.connect_to( ServiceCoord("ScoringService", 0)) for i in xrange(get_service_shards("Worker")): worker = ServiceCoord("Worker", i) self.pool.add_worker(worker) self.add_timeout(self.dispatch_jobs, None, EvaluationService.CHECK_DISPATCH_TIME, immediately=True) self.add_timeout(self.check_workers_timeout, None, EvaluationService.WORKER_TIMEOUT_CHECK_TIME, immediately=False) self.add_timeout(self.check_workers_connection, None, EvaluationService.WORKER_CONNECTION_CHECK_TIME, immediately=False) self.add_timeout(self.search_jobs_not_done, None, EvaluationService.JOBS_NOT_DONE_CHECK_TIME, immediately=True)
def __init__(self, shard, contest): logger.initialize(ServiceCoord("ContestWebServer", shard)) self.contest = contest # This is a dictionary (indexed by username) of pending # notification. Things like "Yay, your submission went # through.", not things like "Your question has been replied", # that are handled by the db. Each username points to a list # of tuples (timestamp, subject, text). self.notifications = {} parameters = { "login_url": "/", "template_path": os.path.join(os.path.dirname(__file__), "templates", "contest"), "static_path": os.path.join(os.path.dirname(__file__), "static"), "cookie_secret": base64.b64encode(config.secret_key), "debug": config.tornado_debug, } parameters["is_proxy_used"] = config.is_proxy_used WebService.__init__( self, config.contest_listen_port[shard], _cws_handlers, parameters, shard=shard, listen_address=config.contest_listen_address[shard], ) self.file_cacher = FileCacher(self) self.evaluation_service = self.connect_to(ServiceCoord("EvaluationService", 0)) self.scoring_service = self.connect_to(ServiceCoord("ScoringService", 0))
def __init__(self, shard): logger.initialize(ServiceCoord("Worker", shard)) Service.__init__(self, shard, custom_logger=logger) self.file_cacher = FileCacher(self) self.work_lock = threading.Lock() self.ignore_job = False
def __init__(self, shard, contest_id=None): """If contest_id is not None, we assume the user wants the autorestart feature. """ logger.initialize(ServiceCoord("ResourceService", shard)) Service.__init__(self, shard, custom_logger=logger) self.contest_id = contest_id # _local_store is a dictionary indexed by time in int(epoch) self._local_store = [] # Floating point epoch using for precise measurement of percents self._last_saved_time = time.time() # Starting point for cpu times self._prev_cpu_times = self._get_cpu_times() # Sorted list of ServiceCoord running in the same machine self._local_services = self._find_local_services() # Dict service with bool to mark if we will restart them. self._will_restart = dict( (service, None if self.contest_id is None else True) for service in self._local_services ) # Found process associate to the ServiceCoord. self._procs = dict((service, None) for service in self._local_services) # Previous cpu time for each service. self._services_prev_cpu_times = dict((service, (0.0, 0.0)) for service in self._local_services) # Start finding processes and their cputimes. self._store_resources(store=False) self.add_timeout(self._store_resources, None, 5) if self.contest_id is not None: self._launched_processes = set([]) self.add_timeout(self._restart_services, None, 5, immediately=True)
def __init__(self, shard): logger.initialize(ServiceCoord("AdminWebServer", shard)) # A list of pending notifications. self.notifications = [] parameters = { "login_url": "/", "template_path": os.path.join(os.path.dirname(__file__), "templates", "admin"), "static_path": os.path.join(os.path.dirname(__file__), "static"), "cookie_secret": base64.b64encode(config.secret_key), "debug": config.tornado_debug, } WebService.__init__(self, config.admin_listen_port, _aws_handlers, parameters, shard=shard, custom_logger=logger, listen_address=config.admin_listen_address) self.file_cacher = FileCacher(self) self.evaluation_service = self.connect_to( ServiceCoord("EvaluationService", 0)) self.scoring_service = self.connect_to( ServiceCoord("ScoringService", 0)) self.resource_services = [] for i in xrange(get_service_shards("ResourceService")): self.resource_services.append(self.connect_to( ServiceCoord("ResourceService", i))) self.logservice = self.connect_to(ServiceCoord("LogService", 0))
def __init__(self, shard): logger.initialize(ServiceCoord("Worker", shard)) Service.__init__(self, shard, custom_logger=logger) self.file_cacher = FileCacher(self) self.task_type = None self.work_lock = threading.Lock() self.session = None
def __init__(self, shard, contest_id): logger.initialize(ServiceCoord("ScoringService", shard)) Service.__init__(self, shard, custom_logger=logger) self.contest_id = contest_id self.scorers = {} self._initialize_scorers() # If for some reason (SS switched off for a while, or broken # connection with ES), submissions have been left without # score, this is the set where you want to pur their ids. Note # that sets != {} if and only if there is an alive timeout for # the method "score_old_submission". self.submission_ids_to_score = set([]) self.submission_ids_to_token = set([]) self.scoring_old_submission = False # We need to load every submission at start, but we don't want # to invalidate every score so that we can simply load the # score-less submissions. So we keep a set of submissions that # we analyzed (for scoring and for tokens). self.submission_ids_scored = set() self.submission_ids_tokened = set() # Initialize ranking web servers we need to send data to. self.rankings = [] for i in xrange(len(config.rankings_address)): address = config.rankings_address[i] username = config.rankings_username[i] password = config.rankings_password[i] self.rankings.append(( address[0], # HTTP / HTTPS "%s:%d" % tuple(address[1:]), get_authorization(username, password))) self.initialize_queue = set() self.submission_queue = dict() self.subchange_queue = dict() self.operation_queue_lock = threading.Lock() for ranking in self.rankings: self.initialize_queue.add(ranking) self.log_bridge = LogBridge() thread = threading.Thread(target=self.dispath_operations_thread, args=(self.log_bridge, )) thread.daemon = True thread.start() self.add_timeout(self.search_jobs_not_done, None, ScoringService.JOBS_NOT_DONE_CHECK_TIME, immediately=True) self.add_timeout(self.forward_logs, None, ScoringService.FORWARD_LOG_TIME, immediately=True)
def __init__(self, shard, contest_id): logger.initialize(ServiceCoord("ScoringService", shard)) Service.__init__(self, shard, custom_logger=logger) self.contest_id = contest_id self.scorers = {} self._initialize_scorers() # If for some reason (SS switched off for a while, or broken # connection with ES), submissions have been left without # score, this is the set where you want to pur their ids. Note # that sets != {} if and only if there is an alive timeout for # the method "score_old_submission". self.submission_ids_to_score = set([]) self.submission_ids_to_token = set([]) self.scoring_old_submission = False # We need to load every submission at start, but we don't want # to invalidate every score so that we can simply load the # score-less submissions. So we keep a set of submissions that # we analyzed (for scoring and for tokens). self.submission_ids_scored = set() self.submission_ids_tokened = set() # Initialize ranking web servers we need to send data to. self.rankings = [] for i in xrange(len(config.rankings_address)): address = config.rankings_address[i] username = config.rankings_username[i] password = config.rankings_password[i] self.rankings.append((address[0], # HTTP / HTTPS "%s:%d" % tuple(address[1:]), get_authorization(username, password))) self.initialize_queue = set() self.submission_queue = dict() self.subchange_queue = dict() self.operation_queue_lock = threading.Lock() for ranking in self.rankings: self.initialize_queue.add(ranking) self.log_bridge = LogBridge() thread = threading.Thread(target=self.dispath_operations_thread, args=(self.log_bridge,)) thread.daemon = True thread.start() self.add_timeout(self.search_jobs_not_done, None, ScoringService.JOBS_NOT_DONE_CHECK_TIME, immediately=True) self.add_timeout(self.forward_logs, None, ScoringService.FORWARD_LOG_TIME, immediately=True)
def __init__(self, shard): logger.initialize(ServiceCoord("TestFileCacher", shard)) TestService.__init__(self, shard, custom_logger=logger) # Assume we store the cache in "./cache/fs-cache-TestFileCacher-0/" self.cache_base_path = os.path.join(config.cache_dir, "fs-cache-TestFileCacher-0") self.cache_path = None self.content = None self.fake_content = None self.digest = None self.file_obj = None self.file_cacher = FileCacher(self)
def __init__(self, shard, contest_id): logger.initialize(ServiceCoord("ScoringService", shard)) Service.__init__(self, shard, custom_logger=logger) self.contest_id = contest_id # Initialize scorers, the ScoreType objects holding all # submissions for a given task and deciding scores. self.scorers = {} with SessionGen(commit=False) as session: contest = session.query(Contest).\ filter_by(id=contest_id).first() logger.info("Loaded contest %s" % contest.name) contest.create_empty_ranking_view(timestamp=contest.start) for task in contest.tasks: self.scorers[task.id] = get_score_type(task=task) session.commit() # If for some reason (SS switched off for a while, or broken # connection with ES), submissions have been left without # score, this is the list where you want to pur their # ids. Note that list != [] if and only if there is an alive # timeout for the method "score_old_submission". self.submission_ids_to_score = [] self.submission_ids_to_token = [] # We need to load every submission at start, but we don't want # to invalidate every score so that we can simply load the # score-less submissions. So we keep a set of submissions that # we analyzed (for scoring and for tokens). self.submission_ids_scored = set() self.submission_ids_tokened = set() # Initialize ranking web servers we need to send data to. self.rankings = [] for i in xrange(len(config.rankings_address)): address = config.rankings_address[i] username = config.rankings_username[i] password = config.rankings_password[i] auth = get_authorization(username, password) self.rankings.append(("%s:%d" % tuple(address), auth)) self.operation_queue = [] for ranking in self.rankings: self.operation_queue.append((self.initialize, [ranking])) self.add_timeout(self.dispatch_operations, None, ScoringService.CHECK_DISPATCH_TIME, immediately=True) self.add_timeout(self.search_jobs_not_done, None, ScoringService.JOBS_NOT_DONE_CHECK_TIME, immediately=True)
def __init__(self, shard, contest_id): logger.initialize(ServiceCoord("ScoringService", shard)) Service.__init__(self, shard, custom_logger=logger) self.contest_id = contest_id self.scorers = {} self._initialize_scorers() # If for some reason (SS switched off for a while, or broken # connection with ES), submissions have been left without # score, this is the list where you want to pur their # ids. Note that list != [] if and only if there is an alive # timeout for the method "score_old_submission". self.submission_ids_to_score = [] self.submission_ids_to_token = [] # We need to load every submission at start, but we don't want # to invalidate every score so that we can simply load the # score-less submissions. So we keep a set of submissions that # we analyzed (for scoring and for tokens). self.submission_ids_scored = set() self.submission_ids_tokened = set() # Initialize ranking web servers we need to send data to. self.rankings = [] for i in xrange(len(config.rankings_address)): address = config.rankings_address[i] username = config.rankings_username[i] password = config.rankings_password[i] auth = get_authorization(username, password) self.rankings.append(("%s:%d" % tuple(address), auth)) self.operation_queue = [] for ranking in self.rankings: self.operation_queue.append((self.initialize, [ranking])) self.add_timeout(self.dispatch_operations, None, ScoringService.CHECK_DISPATCH_TIME, immediately=True) self.add_timeout(self.search_jobs_not_done, None, ScoringService.JOBS_NOT_DONE_CHECK_TIME, immediately=True)
def __init__(self, shard): logger.initialize(ServiceCoord("LogService", shard)) Service.__init__(self, shard, custom_logger=logger) log_dir = os.path.join(config.log_dir, "cms") if not mkdir(config.log_dir) or \ not mkdir(log_dir): logger.error("Cannot create necessary directories.") self.exit() return log_filename = "%d.log" % int(time.time()) self._log_file = codecs.open(os.path.join(log_dir, log_filename), "w", "utf-8") try: os.remove(os.path.join(log_dir, "last.log")) except OSError: pass os.symlink(log_filename, os.path.join(log_dir, "last.log")) self._last_messages = []
def __init__(self, shard, contest_id): logger.initialize(ServiceCoord("ScoringService", shard)) Service.__init__(self, shard, custom_logger=logger) self.contest_id = contest_id # If for some reason (SS switched off for a while, or broken # connection with ES), submissions have been left without # score, this is the set where you want to pur their ids. Note # that sets != {} if and only if there is an alive timeout for # the method "score_old_submission". # # submission_results_to_score and submission_results_scored # contain pairs of (submission_id, dataset_id). # # submissions_to_token and submission_tokened contain scalar # values of submission_id. self.submission_results_to_score = set() self.submissions_to_token = set() self.scoring_old_submission = False # We need to load every submission at start, but we don't want # to invalidate every score so that we can simply load the # score-less submissions. So we keep a set of submissions that # we analyzed (for scoring and for tokens). self.submission_results_scored = set() self.submissions_tokened = set() # Create and spawn threads to send data to rankings. self.rankings = list() for ranking in config.rankings: proxy = RankingProxy(ranking) gevent.spawn(proxy.run) self.rankings.append(proxy) self.rankings_initialize() self.add_timeout(self.search_jobs_not_done, None, ScoringService.JOBS_NOT_DONE_CHECK_TIME, immediately=True)
def __init__(self, shard): logger.initialize(ServiceCoord("Checker", shard)) Service.__init__(self, shard, custom_logger=logger) for service in config. async .core_services: self.connect_to(service)
def __init__(self, shard): logger.initialize(ServiceCoord("Checker", shard)) Service.__init__(self, shard, custom_logger=logger) for service in config.async.core_services: self.connect_to(service)