コード例 #1
0
ファイル: resource.py プロジェクト: AltanAlpay/aurora
 def __init__(self,
              task_id,
              task_monitor,
              process_collector=ProcessTreeCollector,
              disk_collector=DiskCollector,
              process_collection_interval=Amount(20, Time.SECONDS),
              disk_collection_interval=Amount(1, Time.MINUTES),
              history_time=Amount(1, Time.HOURS)):
   """
     task_monitor: TaskMonitor object specifying the task whose resources should be monitored
     sandbox: Directory for which to monitor disk utilisation
   """
   self._task_monitor = task_monitor  # exposes PIDs, sandbox
   self._task_id = task_id
   log.debug('Initialising resource collection for task %s' % self._task_id)
   self._process_collectors = dict()  # ProcessStatus => ProcessTreeCollector
   self._process_collector_factory = process_collector
   self._disk_collector_class = disk_collector
   self._disk_collector = None
   self._process_collection_interval = process_collection_interval.as_(Time.SECONDS)
   self._disk_collection_interval = disk_collection_interval.as_(Time.SECONDS)
   min_collection_interval = min(self._process_collection_interval, self._disk_collection_interval)
   history_length = int(history_time.as_(Time.SECONDS) / min_collection_interval)
   if history_length > self.MAX_HISTORY:
     raise ValueError("Requested history length too large")
   log.debug("Initialising ResourceHistory of length %s" % history_length)
   self._history = ResourceHistory(history_length)
   self._kill_signal = threading.Event()
   ExceptionalThread.__init__(self, name='%s[%s]' % (self.__class__.__name__, task_id))
   self.daemon = True
コード例 #2
0
 def __init__(self,
              checkpoint_root,
              verbose=True,
              task_killer=TaskKiller,
              executor_detector=ExecutorDetector,
              task_garbage_collector=TaskGarbageCollector,
              clock=time):
     ExecutorBase.__init__(self)
     ExceptionalThread.__init__(self)
     self.daemon = True
     self._stop_event = threading.Event()
     # mapping of task_id => (TaskInfo, AdjustRetainedTasks), in the order in
     # which they were received via a launchTask.
     self._gc_task_queue = OrderedDict()
     # cache the ExecutorDriver provided by the slave, so we can use it out
     # of band from slave-initiated callbacks.  This should be supplied by
     # ExecutorBase.registered() when the executor first registers with the
     # slave.
     self._driver = None
     self._slave_id = None  # cache the slave ID provided by the slave
     self._task_id = None  # the task_id currently being executed by the ThermosGCExecutor, if any
     self._start_time = None  # the start time of a task currently being executed, if any
     self._detector = executor_detector()
     self._collector = task_garbage_collector(root=checkpoint_root)
     self._clock = clock
     self._task_killer = task_killer
     self._checkpoint_root = checkpoint_root
     self._dropped_tasks = AtomicGauge('dropped_tasks')
     self.metrics.register(self._dropped_tasks)
コード例 #3
0
ファイル: resource.py プロジェクト: apache/aurora
  def __init__(
      self,
      task_id,
      task_monitor,
      disk_collector_provider=DiskCollectorProvider(),
      process_collection_interval=PROCESS_COLLECTION_INTERVAL,
      disk_collection_interval=DiskCollectorSettings.DISK_COLLECTION_INTERVAL,
      history_time=HISTORY_TIME,
      history_provider=HistoryProvider()):

    """
      task_monitor: TaskMonitor object specifying the task whose resources should be monitored
      sandbox: Directory for which to monitor disk utilisation
    """
    self._task_monitor = task_monitor  # exposes PIDs, sandbox
    self._task_id = task_id
    log.debug('Initialising resource collection for task %s', self._task_id)
    self._process_collectors = dict()  # ProcessStatus => ProcessTreeCollector

    self._disk_collector_provider = disk_collector_provider
    self._disk_collector = None
    self._process_collection_interval = process_collection_interval.as_(Time.SECONDS)
    self._disk_collection_interval = disk_collection_interval.as_(Time.SECONDS)
    min_collection_interval = min(self._process_collection_interval, self._disk_collection_interval)
    self._history = history_provider.provides(history_time, min_collection_interval)
    self._kill_signal = threading.Event()
    ExceptionalThread.__init__(self, name='%s[%s]' % (self.__class__.__name__, task_id))
    self.daemon = True
コード例 #4
0
 def __init__(self,
              checkpoint_root,
              verbose=True,
              task_killer=TaskKiller,
              executor_detector=ExecutorDetector,
              task_garbage_collector=TaskGarbageCollector,
              clock=time):
   ExecutorBase.__init__(self)
   ExceptionalThread.__init__(self)
   self.daemon = True
   self._stop_event = threading.Event()
   # mapping of task_id => (TaskInfo, AdjustRetainedTasks), in the order in
   # which they were received via a launchTask.
   self._gc_task_queue = OrderedDict()
   # cache the ExecutorDriver provided by the slave, so we can use it out
   # of band from slave-initiated callbacks.  This should be supplied by
   # ExecutorBase.registered() when the executor first registers with the
   # slave.
   self._driver = None
   self._slave_id = None  # cache the slave ID provided by the slave
   self._task_id = None  # the task_id currently being executed by the ThermosGCExecutor, if any
   self._start_time = None  # the start time of a task currently being executed, if any
   self._detector = executor_detector()
   self._collector = task_garbage_collector(root=checkpoint_root)
   self._clock = clock
   self._task_killer = task_killer
   self._checkpoint_root = checkpoint_root
   self._dropped_tasks = AtomicGauge('dropped_tasks')
   self.metrics.register(self._dropped_tasks)
コード例 #5
0
ファイル: resource.py プロジェクト: Flaque/aurora-1
 def __init__(self,
              task_id,
              task_monitor,
              disk_collector=DiskCollector,
              process_collection_interval=PROCESS_COLLECTION_INTERVAL,
              disk_collection_interval=DISK_COLLECTION_INTERVAL,
              history_time=HISTORY_TIME,
              history_provider=HistoryProvider()):
     """
   task_monitor: TaskMonitor object specifying the task whose resources should be monitored
   sandbox: Directory for which to monitor disk utilisation
 """
     self._task_monitor = task_monitor  # exposes PIDs, sandbox
     self._task_id = task_id
     log.debug('Initialising resource collection for task %s' %
               self._task_id)
     self._process_collectors = dict(
     )  # ProcessStatus => ProcessTreeCollector
     self._disk_collector_class = disk_collector
     self._disk_collector = None
     self._process_collection_interval = process_collection_interval.as_(
         Time.SECONDS)
     self._disk_collection_interval = disk_collection_interval.as_(
         Time.SECONDS)
     min_collection_interval = min(self._process_collection_interval,
                                   self._disk_collection_interval)
     self._history = history_provider.provides(history_time,
                                               min_collection_interval)
     self._kill_signal = threading.Event()
     ExceptionalThread.__init__(self,
                                name='%s[%s]' %
                                (self.__class__.__name__, task_id))
     self.daemon = True
コード例 #6
0
 def __init__(self,
              task_id,
              task_monitor,
              process_collector=ProcessTreeCollector,
              disk_collector=DiskCollector,
              process_collection_interval=Amount(20, Time.SECONDS),
              disk_collection_interval=Amount(1, Time.MINUTES),
              history_time=Amount(1, Time.HOURS)):
   """
     task_monitor: TaskMonitor object specifying the task whose resources should be monitored
     sandbox: Directory for which to monitor disk utilisation
   """
   self._task_monitor = task_monitor  # exposes PIDs, sandbox
   self._task_id = task_id
   log.debug('Initialising resource collection for task %s' % self._task_id)
   self._process_collectors = dict()  # ProcessStatus => ProcessTreeCollector
   self._process_collector_factory = process_collector
   self._disk_collector_class = disk_collector
   self._disk_collector = None
   self._process_collection_interval = process_collection_interval.as_(Time.SECONDS)
   self._disk_collection_interval = disk_collection_interval.as_(Time.SECONDS)
   min_collection_interval = min(self._process_collection_interval, self._disk_collection_interval)
   history_length = int(history_time.as_(Time.SECONDS) / min_collection_interval)
   if history_length > self.MAX_HISTORY:
     raise ValueError("Requested history length too large")
   log.debug("Initialising ResourceHistory of length %s" % history_length)
   self._history = ResourceHistory(history_length)
   self._kill_signal = threading.Event()
   ExceptionalThread.__init__(self)
   self.daemon = True
コード例 #7
0
 def __init__(self, clock=time):
   self._clock = clock
   self._self = psutil.Process(os.getpid())
   self._orphan = False
   self.metrics.register(LambdaGauge('orphan', lambda: int(self._orphan)))
   self._metrics = dict((metric, MutatorGauge(metric, 0)) for metric in self.MUTATOR_METRICS)
   for metric in self._metrics.values():
     self.metrics.register(metric)
   ExceptionalThread.__init__(self)
   self.daemon = True
コード例 #8
0
 def __init__(self, root, resource_monitor_class=TaskResourceMonitor):
   self._pathspec = TaskPath(root=root)
   self._detector = TaskDetector(root)
   if not issubclass(resource_monitor_class, ResourceMonitorBase):
     raise ValueError("resource monitor class must implement ResourceMonitorBase!")
   self._resource_monitor = resource_monitor_class
   self._active_tasks = {}    # task_id => ActiveObservedTask
   self._finished_tasks = {}  # task_id => FinishedObservedTask
   self._stop_event = threading.Event()
   ExceptionalThread.__init__(self)
   Lockable.__init__(self)
   self.daemon = True
コード例 #9
0
 def __init__(self, root, resource_monitor_class=TaskResourceMonitor):
   self._pathspec = TaskPath(root=root)
   self._detector = TaskDetector(root)
   if not issubclass(resource_monitor_class, ResourceMonitorBase):
     raise ValueError("resource monitor class must implement ResourceMonitorBase!")
   self._resource_monitor = resource_monitor_class
   self._active_tasks = {}    # task_id => ActiveObservedTask
   self._finished_tasks = {}  # task_id => FinishedObservedTask
   self._stop_event = threading.Event()
   ExceptionalThread.__init__(self)
   Lockable.__init__(self)
   self.daemon = True
コード例 #10
0
 def __init__(self, clock=time):
     self._clock = clock
     self._self = psutil.Process(os.getpid())
     if hasattr(self._self, "getcwd"):
         self._version = self.get_release_from_binary(os.path.join(self._self.getcwd(), self._self.cmdline[1]))
     else:
         self._version = "UNKNOWN"
     self.metrics.register(NamedGauge("version", self._version))
     self._orphan = False
     self.metrics.register(LambdaGauge("orphan", lambda: int(self._orphan)))
     self._metrics = dict((metric, MutatorGauge(metric, 0)) for metric in self.MUTATOR_METRICS)
     for metric in self._metrics.values():
         self.metrics.register(metric)
     ExceptionalThread.__init__(self)
     self.daemon = True
コード例 #11
0
 def __init__(self, clock=time):
   self._clock = clock
   self._self = psutil.Process(os.getpid())
   try:
     self._version = self.get_release_from_binary(
       os.path.join(self._self.cwd(), self._self.cmdline()[1]))
   except (IndexError, psutil.Error):
     self._version = 'UNKNOWN'
   self.metrics.register(NamedGauge('version', self._version))
   self._orphan = False
   self.metrics.register(LambdaGauge('orphan', lambda: int(self._orphan)))
   self._metrics = dict((metric, MutatorGauge(metric, 0)) for metric in self.MUTATOR_METRICS)
   for metric in self._metrics.values():
     self.metrics.register(metric)
   ExceptionalThread.__init__(self)
   self.daemon = True
コード例 #12
0
 def __init__(self, clock=time):
   self._clock = clock
   self._self = psutil.Process(os.getpid())
   try:
     self._version = self.get_release_from_binary(
       os.path.join(self._self.cwd(), self._self.cmdline()[1]))
   except (IndexError, psutil.Error):
     self._version = 'UNKNOWN'
   self.metrics.register(NamedGauge('version', self._version))
   self._orphan = False
   self.metrics.register(LambdaGauge('orphan', lambda: int(self._orphan)))
   self._metrics = dict((metric, MutatorGauge(metric, 0)) for metric in self.MUTATOR_METRICS)
   for metric in self._metrics.values():
     self.metrics.register(metric)
   ExceptionalThread.__init__(self)
   self.daemon = True
コード例 #13
0
 def __init__(self,
              path_detector,
              resource_monitor_class=TaskResourceMonitor,
              interval=POLLING_INTERVAL):
   self._detector = ObserverTaskDetector(
       path_detector,
       self.__on_active,
       self.__on_finished,
       self.__on_removed)
   if not issubclass(resource_monitor_class, ResourceMonitorBase):
     raise ValueError("resource monitor class must implement ResourceMonitorBase!")
   self._resource_monitor_class = resource_monitor_class
   self._interval = interval
   self._active_tasks = {}    # task_id => ActiveObservedTask
   self._finished_tasks = {}  # task_id => FinishedObservedTask
   self._stop_event = threading.Event()
   ExceptionalThread.__init__(self)
   Lockable.__init__(self)
   self.daemon = True
コード例 #14
0
ファイル: task_observer.py プロジェクト: ssalevan/aurora
 def __init__(self,
              path_detector,
              interval=POLLING_INTERVAL,
              task_process_collection_interval=TaskResourceMonitor.PROCESS_COLLECTION_INTERVAL,
              task_disk_collection_interval=TaskResourceMonitor.DISK_COLLECTION_INTERVAL):
   self._detector = ObserverTaskDetector(
       path_detector,
       self.__on_active,
       self.__on_finished,
       self.__on_removed)
   self._interval = interval
   self._task_process_collection_interval = task_process_collection_interval
   self._task_disk_collection_interval = task_disk_collection_interval
   self._active_tasks = {}    # task_id => ActiveObservedTask
   self._finished_tasks = {}  # task_id => FinishedObservedTask
   self._stop_event = threading.Event()
   ExceptionalThread.__init__(self)
   Lockable.__init__(self)
   self.daemon = True
コード例 #15
0
    def __init__(self,
                 path_detector,
                 interval=POLLING_INTERVAL,
                 task_process_collection_interval=TaskResourceMonitor.
                 PROCESS_COLLECTION_INTERVAL,
                 enable_mesos_disk_collector=False,
                 disk_collector_settings=DiskCollectorSettings()):

        self._detector = ObserverTaskDetector(path_detector, self.__on_active,
                                              self.__on_finished,
                                              self.__on_removed)
        self._interval = interval
        self._task_process_collection_interval = task_process_collection_interval
        self._enable_mesos_disk_collector = enable_mesos_disk_collector
        self._disk_collector_settings = disk_collector_settings
        self._active_tasks = {}  # task_id => ActiveObservedTask
        self._finished_tasks = {}  # task_id => FinishedObservedTask
        self._stop_event = threading.Event()
        ExceptionalThread.__init__(self)
        Lockable.__init__(self)
        self.daemon = True
コード例 #16
0
 def __init__(self, period, clock):
     self._stop = threading.Event()
     self._period = period
     self._clock = clock
     ExceptionalThread.__init__(self)
     self.daemon = True
コード例 #17
0
ファイル: sampler.py プロジェクト: BabyDuncan/commons
 def __init__(self, period, clock):
   self._stop = threading.Event()
   self._period = period
   self._clock = clock
   ExceptionalThread.__init__(self)
   self.daemon = True