Beispiel #1
0
class GenericStream(SafeThread):

    def __init__(self):
        super().__init__()

        self.format = paInt16
        self.channels = 1
        self.bitrate = 12000
        # assert not self.bitrate % 50, "Bitrate must be divisible by 50"
        self.chunk = self.bitrate // 25

        self.pyaudio = PyAudio()
        self.compress = False

        self._active = Event()
        self._active.set()

    @property
    def chunk_size(self):
        return (self.chunk * self.pyaudio.get_sample_size(self.format)
                * self.channels)

    @property
    def active(self):
        return self._active.is_set()

    @active.setter
    def active(self, value):
        if value:
            self._active.set()
        else:
            self._active.clear()
Beispiel #2
0
class StoppableQThread(QtCore.QThread):
    """ Base class for QThreads which require the ability
    to be stopped by a thread-safe method call
    """

    def __init__(self, parent=None):
        self._should_stop = Event()
        self._should_stop.clear()
        super(StoppableQThread, self).__init__(parent)

    def join(self, timeout=0):
        """ Joins the current thread and forces it to stop after
        the timeout if necessary

        :param timeout: Timeout duration in seconds
        """
        self._should_stop.wait(timeout)
        if not self.should_stop():
            self.stop()
        super(StoppableQThread, self).wait()

    def stop(self):
        self._should_stop.set()

    def should_stop(self):
        return self._should_stop.is_set()

    def __repr__(self):
        return "<%s(should_stop=%s)>" % (self.__class__.__name__, self.should_stop())
Beispiel #3
0
class Queue(list):

    def __init__(self):
        super(Queue, self).__init__()
        self._lock = Lock()
        self._fill = Event()

    def put(self, obj):
        with self._lock:
            self.append(obj)
            self._fill.set()

    def get(self, block=True):
        with self._lock:
            if len(self) == 0:
                self._fill.clear()
        if not self._fill.isSet():
            if block:
                self._fill.wait()
            else:
                return None
        with self._lock:
            return self.pop(0)

    def delete(self, index):
        if 0 <= index < len(self):
            with self._lock:
                del self[index]

    def remove(self, element):
        if element in self:
            with self._lock:
                del self[self.index(element)]
Beispiel #4
0
            class Worker(object):
                def __init__(self, jobQueue, secondsPerJob):
                    self.busyEvent = Event()
                    self.stopEvent = Event()

                    def workerFn():
                        while True:
                            if self.stopEvent.is_set():
                                return
                            try:
                                jobQueue.get(timeout=1.0)
                            except Empty:
                                continue
                            self.busyEvent.set()
                            time.sleep(secondsPerJob)
                            self.busyEvent.clear()

                    self.startTime = time.time()
                    self.worker = Thread(target=workerFn)
                    self.worker.start()

                def stop(self):
                    self.stopEvent.set()
                    self.worker.join()
                    return time.time() - self.startTime
Beispiel #5
0
class Dotter(object):
    def __init__(self,delay=100,symbol='.'):
        self.event=Event()
        self.delay=delay
        self.symbol=symbol
        self.status=False
    def __loop(self):
        while not self.event.is_set():
            stdout.write(self.symbol)
            stdout.flush()
            sleep(self.delay/1000)
    def start(self):
        if not self.status:
            self.event.clear()
            Thread(target=self.__loop).start()
            self.status=True
    def stop(self,newLine=True):
        if self.status:
            self.event.set()
            if newLine:
                stdout.write('\n')
            self.status=False
    def set(self,delay=None,symbol=None):
        if delay!=None:
            self.delay=delay
        if symbol!=None:
            self.symbol=symbol
        if self.status:
            self.stop(False)
            self.start()
class InterruptableEvent(object):
    """Event class for for Python v2.7 which behaves more like Python v3.2
    threading.Event and allows signals to interrupt waits"""

    def __init__(self):
        self.__event = Event()

    def is_set(self):
        return self.__event.is_set()

    def set(self):
        self.__event.set()

    def clear(self):
        self.__event.clear()

    def wait(self, timeout=None):
        # infinite
        if timeout is None:
            # event with timeout is interruptable
            while not self.__event.wait(60):
                pass
            return True

        # finite
        else:
            # underlying Event will perform timeout argument validation
            return self.__event.wait(timeout)
Beispiel #7
0
    def test_term_thread(self):
        """ctx.term should not crash active threads (#139)"""
        ctx = self.Context()
        evt = Event()
        evt.clear()

        def block():
            s = ctx.socket(zmq.REP)
            s.bind_to_random_port('tcp://127.0.0.1')
            evt.set()
            try:
                s.recv()
            except zmq.ZMQError as e:
                self.assertEqual(e.errno, zmq.ETERM)
                return
            finally:
                s.close()
            self.fail("recv should have been interrupted with ETERM")
        t = Thread(target=block)
        t.start()
        
        evt.wait(1)
        self.assertTrue(evt.is_set(), "sync event never fired")
        time.sleep(0.01)
        ctx.term()
        t.join(timeout=1)
        self.assertFalse(t.is_alive(), "term should have interrupted s.recv()")
Beispiel #8
0
  class Worker(Thread):
	  
    def __init__(self):
      Thread.__init__(self)
      self.trigger = Event()
      self.uri = None
      self.sonos = None
      self.daemon = True
      self.lock = Lock()

    def start_url(self, a_group, uri):
      with self.lock:
		self.sonos = a_group
		self.uri = uri
      self.trigger.set()

    
    def run(self):
      while True:
        self.trigger.wait()
        if self.sonos is None:
		  # take this as a sign to shut down gracefully
          break
        with self.lock:
          uri = self.uri
          self.uri = None
          self.trigger.clear()
        result = self.sonos.play_uri( uri)
        print result
class PeriodicTimer(object):

    def __init__(self, frequency=60, *args, **kwargs):
        self.is_stopped = Event()
        self.is_stopped.clear()

        self.interval = frequency
        self._timer = Timer(self.frequency, self._check_for_event, ())
        self._timer.daemon = True

    @property
    def interval(self):
        return self.frequency

    @interval.setter
    def interval(self, frequency):
        self.frequency = frequency
        self.stop()
        try:
            if self._timer:
                self._timer.cancel()
                del(self._timer)
        except AttributeError, ex:
            pass
        self._timer = Timer(self.frequency, self._check_for_event, ())
        return self.frequency
Beispiel #10
0
class NonSubscribeListener(object):
    def __init__(self):
        self.result = None
        self.status = None
        self.done_event = Event()

    def callback(self, result, status):
        self.result = result
        self.status = status
        self.done_event.set()

    def pn_await(self, timeout=5):
        """ Returns False if a timeout happened, otherwise True"""
        return self.done_event.wait(timeout)

    def await_result(self, timeout=5):
        self.pn_await(timeout)
        return self.result

    def await_result_and_reset(self, timeout=5):
        self.pn_await(timeout)
        cp = copy.copy(self.result)
        self.reset()
        return cp

    def reset(self):
        self.result = None
        self.status = None
        self.done_event.clear()
class TimerWithResume(object):
    def __init__(self, status_subject, refresh_interval):
        self.status_subject = status_subject
        self.abort = Event()
        self.refresh_interval = refresh_interval

    def perform(self):
        while not self.abort.isSet():
            self.status_subject.build_status()
            self.abort.wait(self.refresh_interval)

    def stop(self):
        self.abort.set()

    def start(self):
        self.thread = Thread(target=self.perform)
        self.thread.daemon = True
        self.thread.start()

    def resume(self):
        self.thread.join()
        self.abort.clear()
        self.start()

    def set_refresh_interval(self, new_interval):
        self.refresh_interval = new_interval
Beispiel #12
0
class MotorThread(Thread):
    def __init__(self):
        Thread.__init__(self)
        self.daemon = True
        self.lock = Event()
        self.set(0, 0, 0)

    def set(self, m1, m2, m3):
        self.m1, self.m2, self.m3 = m1, m2, m3
        self.lock.set()

    def run(self):
        while True:
            # Reset all direction pins to avoid damaging H-bridges
            board.digital_write(MOTOR_1_B, 0)
            board.digital_write(MOTOR_1_A, 0)
            board.digital_write(MOTOR_2_B, 0)
            board.digital_write(MOTOR_2_A, 0)
            board.digital_write(MOTOR_3_B, 0)
            board.digital_write(MOTOR_3_A, 0)

            # Set duty cycle
            board.analog_write(MOTOR_1_PWM, int(abs(self.m1) + 25) if self.m1 else 0)
            board.analog_write(MOTOR_2_PWM, int(abs(self.m2) + 25) if self.m2 else 0)
            board.analog_write(MOTOR_3_PWM, int(abs(self.m3) + 25) if self.m3 else 0)

            # Set directions
            board.digital_write(MOTOR_1_A, self.m1 < 0)
            board.digital_write(MOTOR_1_B, self.m1 > 0)
            board.digital_write(MOTOR_2_A, self.m2 < 0)
            board.digital_write(MOTOR_2_B, self.m2 > 0)
            board.digital_write(MOTOR_3_A, self.m3 < 0)
            board.digital_write(MOTOR_3_B, self.m3 > 0)
            self.lock.wait()
            self.lock.clear()
Beispiel #13
0
    def wait_status(self, status=win32service.SERVICE_RUNNING, timeout=SERVICE_WAIT_TIMEOUT):
        abort = Event()
        abort.clear()

        def die():
            abort.set()

        timer = Timer(timeout, die)
        timer.start()

        current = None
        while True:
            if abort.is_set():
                # If timeout is hit we abort.
                log.warning("Timeout hit waiting service for status %s, current status %s",
                            status, current['CurrentState'])
                return

            current = win32service.QueryServiceStatusEx(self.service)

            if current['CurrentState'] == status:
                timer.cancel()
                return

            time.sleep(1)
class Heartbeater(object):
    interval = 5

    def __init__(self, api, jobstep_id, interval=None):
        self.api = api
        self.jobstep_id = jobstep_id
        self.cv = Condition()
        self.finished = Event()

        if interval is not None:
            self.interval = interval

    def wait(self):
        with self.cv:
            self.finished.clear()
            while not self.finished.is_set():
                data = self.api.get_jobstep(self.jobstep_id)
                if data['status']['id'] == 'finished':
                    self.finished.set()
                    break

                self.cv.wait(self.interval)

    def close(self):
        with self.cv:
            self.finished.set()
            self.cv.notifyAll()
Beispiel #15
0
class HandleDataRequest(QObject):

    # Ensure data is obtained in the GUI thread

    call = pyqtSignal(object, object)

    def __init__(self):
        QObject.__init__(self)
        self.called = Event()
        self.call.connect(self.run_func, Qt.QueuedConnection)

    def run_func(self, func, data):
        try:
            self.result, self.tb = func(data), None
        except Exception:
            import traceback
            self.result, self.tb = None, traceback.format_exc()
        finally:
            self.called.set()

    def __call__(self, request):
        func = data_funcs[request.type]
        if is_gui_thread():
            try:
                return func(request.data), None
            except Exception:
                import traceback
                return None, traceback.format_exc()
        self.called.clear()
        self.call.emit(func, request.data)
        self.called.wait()
        try:
            return self.result, self.tb
        finally:
            del self.result, self.tb
Beispiel #16
0
class Controller(object):
    def __init__(self, map):
        self.map = map
        self.snakes = set()
        self.ticked = Event()

    def add_snake(self, snake):
        self.snakes.add(snake)

    def tick(self):
        for s in self.snakes:
            s.move(self.map)

        deads, snake_map = self.map.draw_and_check_snakes(self.snakes)
        self.snakes = self.snakes - deads

        for s in deads:
            s.dead = True

        self.ticked.set()
        self.ticked.clear()

    def to_json_dict(self):
        return {
            'map': self.map.to_json_dict(),
            'snakes': {s.id: [ctps(b) for b in s.body] for s in self.snakes}
        }
Beispiel #17
0
class Autoscroll(Thread):
    """A Thread subclass that implements autoscroll timer."""
    def __init__(self, widget):
        """Initialize the class."""
        super().__init__()
        self.daemon = True
        self.widget = widget
        self.event = Event()

    def _can_scroll(self):
        """Return True if we can autoscroll."""
        return (lyvi.player.length and lyvi.player.state == 'play' and lyvi.ui.view == 'lyrics'
                and not lyvi.ui.hidden and self.widget.pos not in ('All', 'Bot'))

    def run(self):
        """Start the timer."""
        while True:
            if self._can_scroll():
                time = ceil(lyvi.player.length / (self.widget.total_lines - self.widget.size[1]))
                reset = False
                for _ in range(time):
                    if self.event.wait(1):
                        reset = True
                        self.event.clear()
                        break
                if not reset and self._can_scroll():
                    self.widget.keypress(self.widget.size, 'down')
            else:
                sleep(1)

    def reset(self):
        """Reset the timer."""
        self.event.set()
Beispiel #18
0
    def test_manual_execute_with_ids_ignored_while_in_execute(self):
        waiter = Event()

        long_execute_waiter = Event()

        # noinspection PyUnusedLocal
        def execute(*args, **kwargs):
            waiter.set()
            long_execute_waiter.wait(1)
            self.assertTrue(long_execute_waiter.is_set)

        execute_mock = Mock(side_effect=execute)
        self.trackers_manager.execute = execute_mock

        self.create_runner(interval=1)
        self.engine_runner.execute(None)
        waiter.wait(0.3)
        waiter.clear()
        ids = [1, 2, 3]
        self.engine_runner.execute(ids)
        long_execute_waiter.set()
        waiter.wait(0.3)
        self.assertTrue(waiter.is_set)

        self.stop_runner()

        execute_mock.assert_called_once_with(ANY, None)
Beispiel #19
0
    def test_manual_execute_shouldnt_reset_timeout_for_whole_execute(self):
        executed = Event()

        # noinspection PyUnusedLocal
        def execute(*args, **kwargs):
            executed.set()

        execute_mock = Mock(side_effect=execute)
        self.trackers_manager.execute = execute_mock

        with DBSession() as db:
            db.add(ExecuteSettings(interval=1, last_execute=None))

        self.create_runner()

        sleep(0.5)

        # start manual
        self.engine_runner.execute([1, 2, 3])
        executed.wait(0.3)
        executed.clear()

        sleep(0.5)
        executed.wait(0.3)

        self.assertTrue(executed.is_set)
        self.stop_runner()

        self.assertEqual(2, execute_mock.call_count)
Beispiel #20
0
    def test_manual_execute_with_ids_ignored_while_in_execute(self):
        waiter = Event()

        long_execute_waiter = Event()

        # noinspection PyUnusedLocal
        def execute(*args, **kwargs):
            waiter.set()
            long_execute_waiter.wait(1)
            self.assertTrue(long_execute_waiter.is_set)

        execute_mock = Mock(side_effect=execute)
        self.trackers_manager.execute = execute_mock
        clients_manager = ClientsManager({})
        engine_runner = EngineRunner(Logger(), self.trackers_manager, clients_manager, interval=1)
        engine_runner.execute(None)
        waiter.wait(0.3)
        waiter.clear()
        ids = [1, 2, 3]
        engine_runner.execute(ids)
        long_execute_waiter.set()
        waiter.wait(0.3)
        self.assertTrue(waiter.is_set)
        engine_runner.stop()
        engine_runner.join(1)
        self.assertFalse(engine_runner.is_alive())

        execute_mock.assert_called_once_with(ANY, None)
Beispiel #21
0
class MinuteTimer(Thread):
    
    def __init__(self):
        Thread.__init__(self)
        
        self.__event = Event()
        self.running = True
        self.minute_trigger = MinuteTrigger(self.__event)
        
    def stop(self):
        self.minute_trigger.stop()
        self.running = False
        
    def run(self):
        
        self.minute_trigger.start()
        
        while self.running:
            try:
                self.task()
            except Exception as e:
                traceback.print_exc(file=sys.stderr)
                
            self.__event.clear()
            self.__event.wait()
        
        
    def task(self):
        raise Exception("Please override this")
        
        
        
        
Beispiel #22
0
class CallbackExecutor(Thread):

    def __init__(self):
        super(CallbackExecutor, self).__init__()
        self.daemon = True
        self._lock = Lock()
        self._cmd = None
        self._process = None
        self._callback_event = Event()
        self.start()

    def call(self, cmd):
        with self._lock:
            if self._process and self._process.poll() is None:
                self._process.kill()
                logger.warning('Killed the old callback process because it was still running: %s', self._cmd)
        self._cmd = cmd
        self._callback_event.set()

    def run(self):
        while True:
            self._callback_event.wait()
            self._callback_event.clear()
            with self._lock:
                try:
                    self._process = subprocess.Popen(self._cmd, close_fds=True)
                except Exception:
                    logger.exception('Failed to execute %s',  self._cmd)
                    continue
            self._process.wait()
class Dispatcher(Thread):
    cueue = None
    instance = None
    cond = None
    nextTrigger = None

    def __init__(self):
        Thread.__init__(self)
        Dispatcher.instance = self
        Dispatcher.cueue = []
        self.cond = Condition(Lock())
        self.evt = Event()
        self.daemon = True
    
    def run(self):
        while True:
            logger.debug("sleeping for %s" % self.nextTrigger)
            self.evt.wait(self.nextTrigger)
            n = now()
            tasks = [ b for b in Dispatcher.cueue if b.triggerTime <= n ]
            logger.debug("Doing %i callbacks" % len(tasks))
            for t in tasks:
                t.internal_callback()
            if len(Dispatcher.cueue) > 0:
                self.nextTrigger = min([b.triggerTime for b in Dispatcher.cueue]) - now()
            else:
                self.nextTrigger = None
            self.evt.clear()


    def __callLater(self, triggerTime, callback, *a, **kw):
        t = Task(triggerTime, callback, *a, **kw)
        Dispatcher.cueue.append(t)
        if not self.nextTrigger or triggerTime < self.nextTrigger:
            logger.debug("notifying condition")
            # we need to trigger before the next event
            self.evt.set()
            self.nextTrigger = triggerTime
        return t

    @classmethod
    def getPendings(klass):
        return klass.cueue
    
    @classmethod
    def callLater(klass__, triggerTime, callback, *a, **kw):
        return klass__.instance.__callLater(triggerTime, callback, *a, **kw)

    @classmethod
    def cancelCallLater(klass, task):
        if task in klass.cueue:
            task.cancel()
            return True
        return False

    @classmethod
    def cancelAll(klass):
        for task in klass.cueue:
            task.cancel()
        klass.cueue = []
Beispiel #24
0
class myObj ():
    def __init__(self, name, counter):

        self._stopper = Event()
        self.name = name
        self.counter = counter
        self.t  = None
        self.count = 0

    def start(self):
        self.t  = Thread(name = self.name, target = self._doWork, args=(self.name, ) )
        self.t.start()



    def _doWork(self,threadName):
        self._stopper.clear()


        while not self._stopper.is_set():
            print "%s: %s" % ( current_thread().getName(),time.ctime(time.time()) )
            self.count += 1
            self._stopper.wait(10)

        print "Done"
        print ("Count: %d"%self.count)

    def stopit(self):
        self._stopper.set()
        self.t.join()

    def stopped(self):
        return self._stopper.is_set()
Beispiel #25
0
class ExThread(Thread):
    def __init__(self, *args, **kw):
        Thread.__init__(self, *args, **kw)
        self._willKill = False
        self._event = Event()
        self._event.set()

    def localtrace(self, why):
        if self._willKill and why == "line":
            raise SystemError()
        elif not self._event.isSet():
            self._event.wait()
        return self.localtrace

    def kill(self):
        self._willKill = True
        self.localtrace(why="line")

    def pause(self):
        self._event.clear()

    def ahead(self):
        self._event.set()

    def run(self):
        while True:
            click_at_pos(500, 320)
            time.sleep(0.1)
Beispiel #26
0
class event(object):
    """ Class encapsulating an event object to control
    sequential access to a resource """

    def __init__(self, *args):
        self.evt = Event()
        self.evt.set()
    
    def __call__(self, f):
        def eventfunc(*args, **kwargs):
            try:
                print 'Waiting on event =>',currentThread()
                self.evt.wait()
                # First thread will clear the event and
                # make others wait, once it is done with the
                # job, it sets the event which wakes up
                # another thread, which does the same thing...
                # This provides sequential access to a
                # resource...
                self.evt.clear()
                print 'Cleared event =>',currentThread()
                try:
                    return f(*args, **kwargs)
                except Exception, e:
                    raise
            finally:
                # Wake up another thread...
                self.evt.set()
                print 'Set event=>',currentThread()

        return eventfunc
Beispiel #27
0
class Sound(hass.Hass):

  def initialize(self):
    
    # Create Queue
    self.queue = Queue(maxsize=0)

    # Create worker thread
    t = Thread(target=self.worker)
    t.daemon = True
    t.start()
    
    self.event = Event()
    
  def worker(self):
    active = True
    while active:
      try:
        # Get data from queue
        data = self.queue.get()
        if data["type"] == "terminate":
          active = False
        else:
          # Save current volume
          volume = self.get_state(self.args["player"], attribute="volume_level")
          # Set to the desired volume
          self.call_service("media_player/volume_set", entity_id = self.args["player"], volume_level = data["volume"])
          if data["type"] == "tts":
            # Call TTS service
            self.call_service("tts/amazon_polly_say", entity_id = self.args["player"], message = data["text"])
          if data["type"] == "play":
            netpath = netpath = 'http://{}:{}/local/{}/{}'.format(self.args["ip"], self.args["port"], self.args["base"], data["path"])
            self.call_service("media_player/play_media", entity_id = self.args["player"], media_content_id = netpath, media_content_type = data["content"])

          # Sleep to allow message to complete before restoring volume
          time.sleep(int(data["length"]))
          # Restore volume
          self.call_service("media_player/volume_set", entity_id = self.args["player"], volume_level = volume)
          # Set state locally as well to avoid race condition
          self.set_state(self.args["player"], attributes = {"volume_level": volume})
      except:
        self.log("Error")
        self.log(sys.exc_info())

      # Rinse and repeat
      self.queue.task_done()
      
    self.log("Worker thread exiting")
    self.event.set()
       
  def tts(self, text, volume, length):
    self.queue.put({"type": "tts", "text": text, "volume": volume, "length": length})
    
  def play(self, path, content, volume, length):
    self.queue.put({"type": "play", "path": path, "content": content, "volume": volume, "length": length})

  def terminate(self):
    self.event.clear()
    self.queue.put({"type": "terminate"})
    self.event.wait()
 def do_download():
     # get activity registry
     from jarabe.model.bundleregistry import get_registry
     registry = get_registry() # requires a dbus-registered main loop
     install_event = Event()
     # progress bar bookkeeping.
     counts = [0, self.activity_list.updates_selected(), 0]
     def p(n, extra, icon):
         if n is None:
             progress_cb(n, extra, icon)
         else:
             progress_cb((n+(counts[0]/counts[1]))/2, extra, icon)
         counts[2] = n # last fraction.
     def q(n, row):
         p(n, _('Downloading %s...') % row[model.DESCRIPTION_BIG],
           row[model.ACTIVITY_ICON])
     for row, f in self.activity_list.download_selected_updates(q):
         if f is None: continue # cancelled or network error.
         try:
             p(counts[2], _('Examining %s...')%row[model.DESCRIPTION_BIG],
               row[model.ACTIVITY_ICON])
             b = actutils.BundleHelper(f)
             p(counts[2], _('Installing %s...') % b.get_name(),
               _svg2pixbuf(b.get_icon_data()))
             install_event.clear()
             GLib.idle_add(self.install_cb, registry, b, install_event)
             install_event.wait()
         except:
             logging.exception("Failed to install bundle")
             pass # XXX: use alert to indicate install failure.
         if os.path.exists(f):
             os.unlink(f)
         counts[0]+=1
     # refresh when we're done.
     GObject.idle_add(self.refresh_cb, None, None, False)
Beispiel #29
0
class EventLoop(object):


    def __init__(self):
        self.registry = {}
        self.threads = {}
        self.queue = Queue.Queue()
        self.run_flag = Event()
        self.run_flag.set()


    def register(self, name, thread_obj=None, event=None):
        """
        If called with two parameters, they should be event to listen to and
        the EventConsumer class that will listen to it.
        If called with one parameter, it should be an EventEmitter class.
        """
        if thread_obj is not None:
            self.threads[name] = thread_obj
        if event is not None:
            if event not in self.registry:
                self.registry[event] = []
            if self.threads[name].add_event not in self.registry[event]:
                self.registry[event].append(self.threads[name].add_event)


    def unregister(self, event, name):
        ev_thr = self.threads[name].add_event
        print('unregister', event, name, ev_thr, self.registry[event])
        if ev_thr in self.registry[event]:
            self.registry[event].remove(ev_thr)
            print(self.registry[event])


    def add_event(self, event, value):
        self.queue.put((event, value))


    def run(self):
        for name in self.threads:
            self.threads[name].start()
        while True:
            try:
                event, value = self.queue.get(True, 20)
                if event == 'unregister':
                    self.unregister(*value)
                if event not in self.registry:
                    print("Invalid event encountered %s with values %r!" %
                          (event, value))
                for listener in self.registry[event]:
                    # print('call', listener, event, value)
                    listener(event, value)
            except KeyboardInterrupt:
                self.run_flag.clear()
                for thread in self.threads:
                    print(thread)
                    self.threads[thread].join()
                return
            except Queue.Empty:
                pass
    class MarkerThread(Thread):
        def __init__(self, da, zl, marker, coord, conf, pixDim):
            Thread.__init__(self)
            self.da = da
            self.update = Event()
            self.update.set()
            self.__stop = Event()
            self.zl = zl
            self.marker = marker
            self.coord = coord
            self.conf = conf
            self.pixDim = pixDim
            self.img = self.marker.get_marker_pixbuf(zl)

        def run(self):
            while not self.__stop.is_set():
                self.update.wait()
                self.update.clear()
                self.draw_markers()

        def stop(self):
            self.__stop.set()
            self.update.set()

        def draw_markers(self):
            for string in self.marker.positions.keys():
                if self.update.is_set() or self.__stop.is_set():
                    break
                mpos = self.marker.positions[string]
                if (self.zl <= mpos[2]) and (mpos[0], mpos[1]) != (self.coord[0], self.coord[1]):
                    gtk.threads_enter()
                    try:
                        self.da.draw_marker(self.conf, mpos, self.zl, self.img, self.pixDim, string)
                    finally:
                        gtk.threads_leave()
Beispiel #31
0
class TelegramRegistrator(TelegramClient):
    # Current TelegramClient version
    __version__ = '0.10.1'

    # region Initialization

    def __init__(self, session, api_id=None, api_hash=None, proxy=None):
        """Initializes the Telegram client with the specified API ID and Hash.

           Session can either be a `str` object (the filename for the loaded/saved .session)
           or it can be a `Session` instance (in which case list_sessions() would probably not work).
           If you don't want any file to be saved, pass `None`

           In the later case, you are free to override the `Session` class to provide different
           .save() and .load() implementations to suit your needs."""

        # if api_id is None or api_hash is None:
        #     raise PermissionError(
        #         'Your API ID or Hash are invalid. Please read "Requirements" on README.rst')

        super().__init__(session, api_id, api_hash, proxy)
        self.api_id = api_id
        self.api_hash = api_hash

        # Determine what session object we have
        # TODO JsonSession until migration is complete (by v1.0)
        if isinstance(session, str) or session is None:
            self.session = JsonSession.try_load_or_create_new(session)
        elif isinstance(session, Session):
            self.session = session
        else:
            raise ValueError(
                'The given session must either be a string or a Session instance.')

        self.transport = None
        self.proxy = proxy  # Will be used when a TcpTransport is created

        self.login_success = False

        # Safety across multiple threads (for the updates thread)
        self._lock = RLock()
        self._logger = app_logger

        # Methods to be called when an update is received
        self._update_handlers = []
        self._updates_thread_running = Event()
        self._updates_thread_receiving = Event()

        # Cache "exported" senders 'dc_id: MtProtoSender' and
        # their corresponding sessions not to recreate them all
        # the time since it's a (somewhat expensive) process.
        self._cached_senders = {}
        self._cached_sessions = {}

        # These will be set later
        self._updates_thread = None
        self.dc_options = None
        self.sender = None
        self.phone_code_hashes = {}

    def connect(self, reconnect=False,
                device_model=None, system_version=None,
                app_version=None, lang_code=None):
        """Connects to the Telegram servers, executing authentication if
           required. Note that authenticating to the Telegram servers is
           not the same as authenticating the desired user itself, which
           may require a call (or several) to 'sign_in' for the first time.

           Default values for the optional parameters if left as None are:
             device_model   = platform.node()
             system_version = platform.system()
             app_version    = TelegramClient.__version__
             lang_code      = 'en'
        """
        if self.transport is None:
            self.transport = TcpTransport(self.session.server_address,
                                          self.session.port, proxy=self.proxy)

        try:
            if not self.session.auth_key or (reconnect and self.sender is not None):
                self.session.auth_key, self.session.time_offset = \
                    authenticator.do_authentication(self.transport)

                self.session.save()

            self.sender = MtProtoSender(self.transport, self.session)
            self.sender.connect()

            # Set the default parameters if left unspecified
            if not device_model:
                device_model = platform.node()
            if not system_version:
                system_version = platform.system()
            if not app_version:
                app_version = self.__version__
            if not lang_code:
                lang_code = 'en'

            # Now it's time to send an InitConnectionRequest
            # This must always be invoked with the layer we'll be using
            query = InitConnectionRequest(
                api_id=self.api_id,
                device_model=device_model,
                system_version=system_version,
                app_version=app_version,
                lang_code=lang_code,
                query=GetConfigRequest())

            result = self.invoke(
                InvokeWithLayerRequest(
                    layer=layer, query=query))

            # We're only interested in the DC options,
            # although many other options are available!
            self.dc_options = result.dc_options

            self.login_success = True
            return True
        except (RPCError, ConnectionError) as error:
            # Probably errors from the previous session, ignore them
            self._logger.warning('Could not stabilise initial connection: {}'
                                 .format(error))
            return False

    def check_phone(self, phone_number):
        result = self.invoke(
                CheckPhoneRequest(phone_number=phone_number))
        return result

    def sign_up(self, phone_number, code, first_name, last_name=''):
        """Signs up to Telegram. Make sure you sent a code request first!"""
        result = self.invoke(
            SignUpRequest(
                phone_number=phone_number,
                phone_code_hash=self.phone_code_hashes[phone_number],
                phone_code=code,
                first_name=first_name,
                last_name=last_name))

        self.session.user = result.user
        self.session.save()
        return result

    def invoke(self, request, timeout=timedelta(seconds=5), throw_invalid_dc=False):
        """Invokes a MTProtoRequest (sends and receives it) and returns its result.
           An optional timeout can be given to cancel the operation after the time delta.
           Timeout can be set to None for no timeout.

           If throw_invalid_dc is True, these errors won't be caught (useful to
           avoid infinite recursion). This should not be set to True manually."""
        if not issubclass(type(request), MTProtoRequest):
            raise ValueError('You can only invoke MtProtoRequests')

        if not self.sender:
            raise ValueError('You must be connected to invoke requests!')

        if self._updates_thread_receiving.is_set():
            self.sender.cancel_receive()

        try:
            self._lock.acquire()
            updates = []
            self.sender.send(request)
            self.sender.receive(request, timeout, updates=updates)
            for update in updates:
                for handler in self._update_handlers:
                    handler(update)

            return request.result

        except InvalidDCError as error:
            if throw_invalid_dc:
                raise
            self._reconnect_to_dc(error.new_dc)
            return self.invoke(request,
                               timeout=timeout, throw_invalid_dc=True)

        except ConnectionResetError:
            self._logger.info('Server disconnected us. Reconnecting and '
                              'resending request...')
            self.reconnect()
            self.invoke(request, timeout=timeout,
                        throw_invalid_dc=throw_invalid_dc)

        except FloodWaitError:
            self.disconnect()
            raise

        finally:
            self._lock.release()

    def reconnect(self):
        """Disconnects and connects again (effectively reconnecting)"""
        self.disconnect()
        self.connect()

    def disconnect(self):
        """Disconnects from the Telegram server and stops all the spawned threads"""
        self._set_updates_thread(running=False)
        if self.sender:
            self.sender.disconnect()
            self.sender = None
        if self.transport:
            self.transport.close()
            self.transport = None

        # Also disconnect all the cached senders
        for sender in self._cached_senders.values():
            sender.disconnect()

        self._cached_senders.clear()
        self._cached_sessions.clear()

    def _set_updates_thread(self, running):
        """Sets the updates thread status (running or not)"""
        if running == self._updates_thread_running.is_set():
            return

        # Different state, update the saved value and behave as required
        self._logger.info('Changing updates thread running status to %s', running)
        if running:
            self._updates_thread_running.set()
            if not self._updates_thread:
                self._updates_thread = Thread(
                    name='UpdatesThread', daemon=True,
                    target=self._updates_thread_method)

            self._updates_thread.start()
        else:
            self._updates_thread_running.clear()
            if self._updates_thread_receiving.is_set():
                self.sender.cancel_receive()

    def _updates_thread_method(self):
        """This method will run until specified and listen for incoming updates"""

        # Set a reasonable timeout when checking for updates
        timeout = timedelta(minutes=1)

        while self._updates_thread_running.is_set():
            # Always sleep a bit before each iteration to relax the CPU,
            # since it's possible to early 'continue' the loop to reach
            # the next iteration, but we still should to sleep.
            sleep(0.1)

            with self._lock:
                self._logger.debug('Updates thread acquired the lock')
                try:
                    self._updates_thread_receiving.set()
                    self._logger.debug('Trying to receive updates from the updates thread')
                    result = self.sender.receive_update(timeout=timeout)
                    self._logger.info('Received update from the updates thread')
                    for handler in self._update_handlers:
                        handler(result)

                except ConnectionResetError:
                    self._logger.info('Server disconnected us. Reconnecting...')
                    self.reconnect()

                except TimeoutError:
                    self._logger.debug('Receiving updates timed out')

                except ReadCancelledError:
                    self._logger.info('Receiving updates cancelled')

                except OSError:
                    self._logger.warning('OSError on updates thread, %s logging out',
                                         'was' if self.sender.logging_out else 'was not')

                    if self.sender.logging_out:
                        # This error is okay when logging out, means we got disconnected
                        # TODO Not sure why this happens because we call disconnect()…
                        self._set_updates_thread(running=False)
                    else:
                        raise

            self._logger.debug('Updates thread released the lock')
            self._updates_thread_receiving.clear()

        # Thread is over, so clean unset its variable
        self._updates_thread = None

    def _reconnect_to_dc(self, dc_id):
        """Reconnects to the specified DC ID. This is automatically
           called after an InvalidDCError is raised"""
        dc = self._get_dc(dc_id)

        self.transport.close()
        self.transport = None
        self.session.server_address = dc.ip_address
        self.session.port = dc.port
        self.session.save()

        self.connect(reconnect=True)

    def _get_dc(self, dc_id):
        """Gets the Data Center (DC) associated to 'dc_id'"""
        if not self.dc_options:
            raise ConnectionError(
                'Cannot determine the required data center IP address. '
                'Stabilise a successful initial connection first.')

        return next(dc for dc in self.dc_options if dc.id == dc_id)

    def send_code_request(self, phone_number):
        """Sends a code request to the specified phone number"""
        result = self.invoke(SendCodeRequest(phone_number, self.api_id, self.api_hash))
        self.phone_code_hashes[phone_number] = result.phone_code_hash
        return result

    def run(self):
        # Listen for updates
        self.add_update_handler(self.update_handler)

        # Enter a while loop to chat as long as the user wants
        while True:
            # Retrieve the top dialogs
            dialog_count = 10

            # Entities represent the user, chat or channel
            # corresponding to the dialog on the same index
            dialogs, entities = self.get_dialogs(dialog_count)

            i = None
            while i is None:
                print_title('Dialogs window')

                # Display them so the user can choose
                for i, entity in enumerate(entities, start=1):
                    sprint('{}. {}'.format(i, get_display_name(entity)))

                # Let the user decide who they want to talk to
                print()
                print('> Who do you want to send messages to?')
                print('> Available commands:')
                print('  !q: Quits the dialogs window and exits.')
                print('  !l: Logs out, terminating this session.')
                print()
                i = input('Enter dialog ID or a command: ')
                if i == '!q':
                    return
                if i == '!l':
                    self.log_out()
                    return

                try:
                    i = int(i if i else 0) - 1
                    # Ensure it is inside the bounds, otherwise retry
                    if not 0 <= i < dialog_count:
                        i = None
                except ValueError:
                    i = None

            # Retrieve the selected user (or chat, or channel)
            entity = entities[i]

            # Show some information
            print_title('Chat with "{}"'.format(get_display_name(entity)))
            print('Available commands:')
            print('  !q: Quits the current chat.')
            print('  !Q: Quits the current chat and exits.')
            print('  !h: prints the latest messages (message History).')
            print('  !up <path>: Uploads and sends the Photo from path.')
            print('  !uf <path>: Uploads and sends the File from path.')
            print('  !dm <msg-id>: Downloads the given message Media (if any).')
            print('  !dp: Downloads the current dialog Profile picture.')
            print()

            # And start a while loop to chat
            while True:
                msg = input('Enter a message: ')
                # Quit
                if msg == '!q':
                    break
                elif msg == '!Q':
                    return

                # History
                elif msg == '!h':
                    # First retrieve the messages and some information
                    total_count, messages, senders = self.get_message_history(
                        entity, limit=10)

                    # Iterate over all (in reverse order so the latest appear
                    # the last in the console) and print them with format:
                    # "[hh:mm] Sender: Message"
                    for msg, sender in zip(
                            reversed(messages), reversed(senders)):
                        # Get the name of the sender if any
                        if sender:
                            name = getattr(sender, 'first_name', None)
                            if not name:
                                name = getattr(sender, 'title')
                                if not name:
                                    name = '???'
                        else:
                            name = '???'

                        # Format the message content
                        if getattr(msg, 'media', None):
                            self.found_media.add(msg)
                            # The media may or may not have a caption
                            caption = getattr(msg.media, 'caption', '')
                            content = '<{}> {}'.format(
                                type(msg.media).__name__, caption)

                        elif hasattr(msg, 'message'):
                            content = msg.message
                        elif hasattr(msg, 'action'):
                            content = str(msg.action)
                        else:
                            # Unknown message, simply print its class name
                            content = type(msg).__name__

                        # And print it to the user
                        sprint('[{}:{}] (ID={}) {}: {}'.format(
                            msg.date.hour, msg.date.minute, msg.id, name,
                            content))

                # Send photo
                elif msg.startswith('!up '):
                    # Slice the message to get the path
                    self.send_photo(path=msg[len('!up '):], entity=entity)

                # Send file (document)
                elif msg.startswith('!uf '):
                    # Slice the message to get the path
                    self.send_document(path=msg[len('!uf '):], entity=entity)

                # Download media
                elif msg.startswith('!dm '):
                    # Slice the message to get message ID
                    self.download_media(msg[len('!dm '):])

                # Download profile photo
                elif msg == '!dp':
                    output = str('usermedia/propic_{}'.format(entity.id))
                    print('Downloading profile picture...')
                    success = self.download_profile_photo(entity.photo, output)
                    if success:
                        print('Profile picture downloaded to {}'.format(
                            output))
                    else:
                        print('No profile picture found for this user.')

                # Send chat message (if any)
                elif msg:
                    self.send_message(
                        entity, msg, no_web_page=True)

    @staticmethod
    def update_handler(update_object):
        if type(update_object) is UpdateShortMessage:
            if update_object.out:
                sprint('You sent {} to user #{}'.format(
                    update_object.message, update_object.user_id))
            else:
                sprint('[User #{} sent {}]'.format(
                    update_object.user_id, update_object.message))

        elif type(update_object) is UpdateShortChatMessage:
            if update_object.out:
                sprint('You sent {} to chat #{}'.format(
                    update_object.message, update_object.chat_id))
            else:
                sprint('[Chat #{}, user #{} sent {}]'.format(
                    update_object.chat_id, update_object.from_id,
                    update_object.message))

    def is_user_authorized(self):
        """Has the user been authorized yet
           (code request sent and confirmed)?"""
        return self.session and self.get_me() is not None
class RealsenseGait(SetpointsGait):
    """
    The RealsenseGait class is used for creating gaits based on the parameters given
    by the realsense reader. From these parameters the subgaits to interpolate are
    interpolated after a realsense call during the start of the gait. It is based on the
    setpoints gait, and it uses the interpolation over 1 or 2 dimensions with 2 or 4
    subgaits respectively.
    """

    SERVICE_TIMEOUT = Duration(seconds=2.0)
    INITIAL_START_DELAY_TIME = Duration(seconds=10.0)
    CAMERA_NAME_MAP = {
        "front": GetGaitParameters.Request.CAMERA_FRONT,
        "back": GetGaitParameters.Request.CAMERA_BACK,
    }
    REALSENSE_CATEGORY_MAP = {
        "stairs_up": GetGaitParameters.Request.STAIRS_UP,
        "stairs_down": GetGaitParameters.Request.STAIRS_DOWN,
        "ramp_up": GetGaitParameters.Request.RAMP_UP,
        "ramp_down": GetGaitParameters.Request.RAMP_DOWN,
        "sit": GetGaitParameters.Request.SIT,
        "curb_up": GetGaitParameters.Request.CURB_UP,
        "curb_down": GetGaitParameters.Request.CURB_DOWN,
    }

    def __init__(
        self,
        gait_name: str,
        subgaits: dict,
        graph: SubgaitGraph,
        gait_selection: GaitSelection,
        realsense_category: str,
        camera_to_use: str,
        subgaits_to_interpolate: dict,
        dimensions: InterpolationDimensions,
        process_service: Client,
        starting_position: EdgePosition,
        final_position: EdgePosition,
        parameters: List[float],
        dependent_on: List[str],
        responsible_for: List[str],
    ):
        super(RealsenseGait, self).__init__(gait_name, subgaits, graph)
        self._gait_selection = gait_selection
        self.logger = Logger(self._gait_selection, __class__.__name__)
        self.parameters = parameters
        self.dimensions = dimensions
        self.dimensions = dimensions
        self.realsense_category = self.realsense_category_from_string(
            realsense_category)
        self.camera_to_use = self.camera_msg_from_string(camera_to_use)
        self.subgaits_to_interpolate = subgaits_to_interpolate
        # Set up service and event for asynchronous
        self._get_gait_parameters_service = process_service
        self.realsense_service_event = Event()
        self.realsense_service_result = None
        self._starting_position = starting_position
        self._final_position = final_position
        self._dependent_on = dependent_on
        self._responsible_for = responsible_for

    @property
    def dependent_on(self):
        return self._dependent_on

    @property
    def responsible_for(self):
        return self._responsible_for

    @property
    def subsequent_subgaits_can_be_scheduled_early(self) -> bool:
        """
        Whether a subgait can be scheduled early, this is not possible for the realsense
        gait, since this will later have a service call to determine the next subgait.
        """
        return True

    @property
    def first_subgait_can_be_scheduled_early(self) -> bool:
        """
        Whether the first subgait can be started with a delay, this is possible for
        the realsense gait.
        """
        return True

    @property
    def starting_position(self) -> EdgePosition:
        return self._starting_position

    @property
    def final_position(self) -> EdgePosition:
        return self._final_position

    @classmethod
    def from_yaml(
        cls,
        gait_selection: GaitSelection,
        robot: urdf.Robot,
        gait_name: str,
        gait_config: dict,
        gait_graph: dict,
        gait_directory: str,
        process_service: Client,
    ):
        """
        Construct a realsense gait from the gait_config from the realsense_gaits.yaml.

        :param gait_selection: The GaitSelection node that will be used for making the
        service calls to the
        realsense reader.
        :param robot: The urdf robot that can be used to verify the limits of the
        subgaits.
        :param gait_name: The name of the gait.
        :param gait_config: The yaml node with the needed configurations.
        :param gait_graph: The graph from the .gait file with the subgait transitions.
        :param gait_directory: The gait_directory that is being used.
        :param process_service: The service from which to get the gait parameters
        :return: The constructed RealsenseGait
        """
        graph = SubgaitGraph(gait_graph)
        subgaits_to_interpolate = {}
        try:
            dimensions = InterpolationDimensions.from_integer(
                gait_config["dimensions"])
            dependent_on = gait_config.get("dependent_on", [])
            responsible_for = gait_config.get("responsible_for", [])

            parameters = [0.0 for _ in range(amount_of_parameters(dimensions))]

            realsense_category = gait_config["realsense_category"]
            camera_to_use = gait_config["camera_to_use"]
            subgait_version_map = gait_config["subgaits"]
            # Create subgaits to interpolate with
            for subgait_name in subgait_version_map:
                subgaits_to_interpolate[subgait_name] = [
                    Subgait.from_name_and_version(robot, gait_directory,
                                                  gait_name, subgait_name,
                                                  version)
                    for version in subgait_version_map[subgait_name]
                ]
                if len(subgaits_to_interpolate[subgait_name]
                       ) != amount_of_subgaits(dimensions):
                    raise WrongRealSenseConfigurationError(
                        f"The amount of subgaits in the realsense version map "
                        f"({len(subgaits_to_interpolate[subgait_name])}) doesn't match "
                        f"the amount of dimensions for subgait {subgait_name}")

            subgaits = {}
            for subgait_name in subgait_version_map:
                if subgait_name not in ("start", "end"):
                    subgaits[subgait_name] = Subgait.interpolate_n_subgaits(
                        dimensions=dimensions,
                        subgaits=subgaits_to_interpolate[subgait_name],
                        parameters=parameters,
                        use_foot_position=True,
                    )

            starting_position = cls.parse_edge_position(
                gait_config["starting_position"],
                subgaits[graph.start_subgaits()[0]].starting_position,
            )
            final_position = cls.parse_edge_position(
                gait_config["final_position"],
                subgaits[graph.end_subgaits()[0]].final_position,
            )

        except KeyError as e:
            raise WrongRealSenseConfigurationError(
                f"There was a missing key to create realsense gait in gait {gait_name}:"
                f" {e}")
        except ValueError as e:
            raise WrongRealSenseConfigurationError(
                f"There was a wrong value in the config for the realsense gait"
                f" {gait_name}: {e}")
        return cls(
            gait_name=gait_name,
            subgaits=subgaits,
            graph=graph,
            gait_selection=gait_selection,
            realsense_category=realsense_category,
            camera_to_use=camera_to_use,
            subgaits_to_interpolate=subgaits_to_interpolate,
            dimensions=dimensions,
            process_service=process_service,
            starting_position=starting_position,
            final_position=final_position,
            parameters=parameters,
            dependent_on=dependent_on,
            responsible_for=responsible_for,
        )

    @classmethod
    def parse_edge_position(cls, config_value: str,
                            position_values: Dict[str, float]):
        """
        Parse the edge position based on the string in the realsense_gaits.yaml.
        :param config_value: The value in the yaml file.
        :param position_values: The actual joint positions at the edge of the gait.
        :return: The edge position to use.
        """
        if config_value == "static":
            return StaticEdgePosition(position_values)
        elif config_value == "dynamic":
            return DynamicEdgePosition(position_values)
        else:
            raise WrongRealSenseConfigurationError(
                "The edge position did not have a "
                "valid value, should be static or "
                f"dynamic, but was `{config_value}`")

    @classmethod
    def realsense_category_from_string(cls, gait_name: str) -> int:
        """
        Construct the realsense gait from the string in the realsense_gaits.yaml.

        :param gait_name: The string from the config.
        :return: The integer to send to the realsense reader to define the category.
        """
        if gait_name not in cls.REALSENSE_CATEGORY_MAP:
            raise WrongRealSenseConfigurationError(
                f"Gait name {gait_name} from the config is not known as a possible "
                f"realsense reader gait configuration")
        return cls.REALSENSE_CATEGORY_MAP[gait_name]

    @classmethod
    def camera_msg_from_string(cls, camera_name: str) -> int:
        """
        Construct the camera name msg from the string in the realsense_gaits.yaml.

        :param camera_name: The string from the config.
        :return: The integer to send to the realsense reader to define the camera.
        """
        if camera_name not in cls.CAMERA_NAME_MAP:
            raise WrongRealSenseConfigurationError(
                f"The camera configuration {camera_name} from the realsense_gaits.yaml"
                f"is not one of the known camera names: {cls.CAMERA_NAME_MAP.keys()}"
            )
        return cls.CAMERA_NAME_MAP[camera_name]

    DEFAULT_FIRST_SUBGAIT_DELAY_START_RS_DURATION = Duration(0)

    def start(
        self,
        current_time: Time,
        first_subgait_delay: Optional[
            Duration] = DEFAULT_FIRST_SUBGAIT_DELAY_START_RS_DURATION,
    ) -> GaitUpdate:
        """
        This function is called to start the realsense gait, it does the following.
        1) Make a service call to march_realsense_reader.
        2) Update all subgaits to interpolated subgaits with the given parameters
        (this will later become only some of the subgaits when the update function is
        also used).
        3) Update the gait parameters to prepare for start
        4) Return the first subgait, if correct parameters were found.

        :return: A gait update that tells the state machine what to do. Empty means
        that that state machine should not start a gait.
        """
        self._reset()
        # Delay start until parameterization is done
        self._start_is_delayed = True
        # Start time will be set later, but to prevent updates during the service
        # calls to think the gait start time has passed, set start time in the future.
        self._start_time = current_time + self.INITIAL_START_DELAY_TIME
        self._current_time = current_time
        # If a gait is dependent on some other gait its subgaits are already
        # interpolated from parameters so we can skip the realsense call
        if not self._dependent_on:
            realsense_update_successful = self.get_realsense_update()
            if not realsense_update_successful:
                return GaitUpdate.empty()

        self._current_subgait = self.subgaits[self.graph.start_subgaits()[0]]
        self._next_subgait = self._current_subgait
        if first_subgait_delay is None:
            first_subgait_delay = self.DEFAULT_FIRST_SUBGAIT_DELAY_START_RS_DURATION
        self._start_time = self._gait_selection.get_clock().now(
        ) + first_subgait_delay
        self._end_time = self._start_time + self._current_subgait.duration
        return GaitUpdate.should_schedule_early(
            self._command_from_current_subgait())

    def get_realsense_update(self):
        """
        Makes a realsense service call and handles the result

        :return: Whether the call was successful
        """
        service_call_succesful = self.make_realsense_service_call()
        if not service_call_succesful:
            self.logger.warn("No service response received within timeout")
            return False

        gait_parameters_response = self.realsense_service_result
        if gait_parameters_response is None or not gait_parameters_response.success:
            self.logger.warn(
                "No gait parameters were found, gait will not be started, "
                f"{gait_parameters_response}")
            return False

        return self.update_gaits_from_realsense_call(
            gait_parameters_response.gait_parameters)

    def update_gaits_from_realsense_call(
            self, gait_parameters: GaitParameters) -> bool:
        """
        Update the gait parameters based on the message of the current gaits and its
        responsibilities.

        :param gait_parameters: The parameters to update to.
        """
        success = True
        self.set_parameters(gait_parameters)
        success &= self.interpolate_subgaits_from_parameters()
        if self._responsible_for and success:
            for gait_name in self._responsible_for:
                gait = self._gait_selection.gaits[gait_name]
                # Make a recursive call to also handle the dependencies of the
                # dependent gait
                if isinstance(gait, RealsenseGait):
                    gait.update_gaits_from_realsense_call(gait_parameters)
        return success

    def make_realsense_service_call(self) -> bool:
        """
        Make a call to the realsense service, if it is available
        and returns the response.

        :return: Whether the call was successful
        """
        if self._current_subgait is not None:
            subgait_name = self._current_subgait.subgait_name
        else:
            # Assume that the gait is starting and use the first subgait name
            subgait_name = self.graph.start_subgaits()[0]

        request = GetGaitParameters.Request(
            realsense_category=self.realsense_category,
            camera_to_use=self.camera_to_use,
            subgait_name=subgait_name,
        )
        self.realsense_service_event.clear()
        if self._get_gait_parameters_service.wait_for_service(
                timeout_sec=self.SERVICE_TIMEOUT.seconds):
            gait_parameters_response_future = (
                self._get_gait_parameters_service.call_async(request))
            gait_parameters_response_future.add_done_callback(
                self._realsense_response_cb)
        else:
            self.logger.error(
                f"The service took longer than {self.SERVICE_TIMEOUT} to become "
                f"available, is the realsense reader running?")
            return False

        return self.realsense_service_event.wait(
            timeout=self.SERVICE_TIMEOUT.seconds)

    def _realsense_response_cb(self, future: Future):
        """Set capture point result when the capture point service returns."""
        self.realsense_service_result = future.result()
        self.realsense_service_event.set()

    def interpolate_subgaits_from_parameters(self) -> bool:
        """Change all subgaits to one interpolated from the current parameters."""
        self.logger.info(
            f"Interpolating gait {self.gait_name} with parameters:"
            f" {self.parameters}")

        new_subgaits = {}
        for subgait_name in self.subgaits.keys():
            new_subgaits[subgait_name] = Subgait.interpolate_n_subgaits(
                dimensions=self.dimensions,
                subgaits=self.subgaits_to_interpolate[subgait_name],
                parameters=self.parameters,
                use_foot_position=True,
            )
        try:
            self.set_subgaits(new_subgaits, self._gait_selection)

        except NonValidGaitContentError:
            return False

        return True

    def set_parameters(self, gait_parameters: GaitParameters) -> None:
        """
        Set the gait parameters based on the message.

        :param gait_parameters: The parameters to set.
        """
        if self.dimensions == InterpolationDimensions.ONE_DIM:
            self.parameters = [gait_parameters.first_parameter]
        elif self.dimensions == InterpolationDimensions.TWO_DIM:
            self.parameters = [
                gait_parameters.first_parameter,
                gait_parameters.second_parameter,
            ]
        else:
            raise UnknownDimensionsError(self.dimensions)

    def set_edge_positions(self, starting_position: EdgePosition,
                           final_position: EdgePosition):
        """
        Set the new edge positions. Overrides from the setpoints gait, which does not
        store the starting or final position
        :param starting_position: The new starting position
        :param final_position: The new final position
        """
        self._starting_position = starting_position
        self._final_position = final_position
Beispiel #33
0
class BlueDot():
    """
    Interacts with a Blue Dot client application, communicating when and where it
    has been pressed, released or held.   

    This class starts an instance of a bluetooth server (btcomm.BluetoothServer) 
    which manages the connection with the Blue Dot client.

    This class is intended for use with the Blue Dot client application.

    The following example will print a message when the Blue Dot is pressed::
    
        from bluedot import BlueDot
        bd = BlueDot()
        bd.wait_for_press()
        print("The blue dot was pressed")

    :param string device:
        The bluetooth device the server should use, the default is ``hci0``, if
        your device only has 1 bluetooth adapter this shouldn't need to be changed.

    :param int port:
        The bluetooth port the server should use, the default is ``1``, and under 
        normal use this should never need to change.

    :param bool auto_start_server:
        If ``True`` (the default), the bluetooth server will be automatically started
        on initialisation, if ``False``, the method ``start`` will need to use called
        before connections will be accepted.
    
    :param bool power_up_device:
        If ``True``, the bluetooth device will be powered up (if required) when the 
        server starts. The default is ``False``. 
        
        Depending on how bluetooth has been powered down, you may need to use rfkill 
        to unblock bluetooth to give permission to bluez to power on bluetooth::

            sudo rfkill unblock bluetooth

    :param bool print_messages:
        If ``True`` (the default), server status messages will be printed stating
        when the server has started and when clients connect / disconect.

    """
    def __init__(self,
                 device="hci0",
                 port=1,
                 auto_start_server=True,
                 power_up_device=False,
                 print_messages=True):

        self._data_buffer = ""
        self._device = device
        self._port = port
        self._power_up_device = power_up_device
        self._print_messages = print_messages

        self._is_connected_event = Event()
        self._is_pressed_event = Event()
        self._is_released_event = Event()
        self._is_moved_event = Event()
        self._is_swiped_event = Event()
        self._is_double_pressed_event = Event()

        self._when_pressed = None
        self._when_double_pressed = None
        self._when_released = None
        self._when_moved = None
        self._when_swiped = None
        self._when_rotated = None
        self._when_client_connects = None
        self._when_client_disconnects = None

        self._position = None
        self._interaction = None
        self._double_press_time = 0.3
        self._rotation_segments = 8

        self._create_server()

        if auto_start_server:
            self.start()

    @property
    def device(self):
        """
        The bluetooth device the server is using. This defaults to ``hci0``.
        """
        return self._device

    @property
    def port(self):
        """
        The port the server is using. This defaults to ``1``.
            """
        return self._port

    @property
    def server(self):
        """
        The ``btcomm.BluetoothServer`` instance that is being used to communicate
        with clients.
        """
        return self._server

    @property
    def adapter(self):
        """
        The ``btcomm.BluetoothAdapter`` instance that is being used.
        """
        return self._server.adapter

    @property
    def paired_devices(self):
        """
        Returns a list of devices paired with this adapater 
        ``((device_mac_address, device_name), (device_mac_address, device_name))``::
        
            bd = BlueDot()
            devices = bd.paired_devices
            for d in devices:
                device_address = d[0]
                device_name = d[1]
        """
        return self._server.adapter.paired_devices

    @property
    def is_connected(self):
        """
        Returns ``True`` if a Blue Dot client is connected.
        """
        return self._is_connected_event.is_set()

    @property
    def is_pressed(self):
        """
        Returns ``True`` if the Blue Dot is pressed (or held).
        """
        return self._is_pressed_event.is_set()

    @property
    def value(self):
        """
        Returns a ``1`` if the Blue Dot is pressed, ``0`` if released.
        """
        return 1 if self.is_pressed else 0

    @property
    def values(self):
        """
        Returns an infinite generator constantly yielding the current value
        """
        while True:
            yield self.value

    @property
    def position(self):
        """
        Returns an instance of ``BlueDotPosition`` representing the 
        current or last position the Blue Dot was pressed, held or 
        released. 
        
        Note - if the Blue Dot is released (and inactive), ``position`` 
        will return position when it was released, until it is pressed 
        again. If the Blue Dot has never been pressed ``position`` will
        return ``None``.
        """
        return self._position

    @property
    def interaction(self):
        """
        Returns an instance of ``BlueDotInteraction`` representing the 
        current or last interaction with the Blue Dot. 
        
        Note - if the Blue Dot is released (and inactive), ``interaction`` 
        will return the interaction when it was released, until it is 
        pressed again. If the Blue Dot has never been pressed ``interaction`` 
        will return ``None``.
        """
        return self._interaction

    @property
    def when_pressed(self):
        """
        Sets or returns the function which is called when the Blue Dot is pressed. 

        The function should accept 0 or 1 parameters, if the function accepts 1 parameter an 
        instance of ``BlueDotPosition`` will be returned representing where the Blue Dot was pressed.
        
        The following example will print a message to the screen when the button is pressed::
        
            from bluedot import BlueDot
            
            def dot_was_pressed():
                print("The Blue Dot was pressed")
                
            bd = BlueDot()
            bd.when_pressed = dot_was_pressed
            
        This example shows how the position of where the dot was pressed can be obtained::
        
            from bluedot import BlueDot
            
            def dot_was_pressed(pos):
                print("The Blue Dot was pressed at pos x={} y={}".format(pos.x, pos.y))
                
            bd = BlueDot()
            bd.when_pressed = dot_was_pressed

        """
        return self._when_pressed

    @when_pressed.setter
    def when_pressed(self, value):
        self._when_pressed = value

    @property
    def when_double_pressed(self):
        """
        Sets or returns the function which is called when the Blue Dot is double pressed. 

        The function should accept 0 or 1 parameters, if the function accepts 1 parameter an 
        instance of ``BlueDotPosition`` will be returned representing where the Blue Dot was 
        pressed the second time.

        Note - the double press event is fired before the 2nd press event e.g. events would be 
        appear in the order, pressed, released, double pressed, pressed.
        """
        return self._when_double_pressed

    @when_double_pressed.setter
    def when_double_pressed(self, value):
        self._when_double_pressed = value

    @property
    def double_press_time(self):
        """
        Sets or returns the time threshold in seconds for a double press. Defaults to ``0.3``.
        """
        return self._double_press_time

    @double_press_time.setter
    def double_press_time(self, value):
        self._double_press_time = value

    @property
    def when_released(self):
        """
        Sets or returns the function which is called when the Blue Dot is released. 

        The function should accept 0 or 1 parameters, if the function accepts 1 parameter an 
        instance of ``BlueDotPosition`` will be returned representing where the Blue Dot was held 
        when it was released.
        """
        return self._when_released

    @when_released.setter
    def when_released(self, value):
        self._when_released = value

    @property
    def when_moved(self):
        """
        Sets or returns the function which is called when the position the Blue Dot is pressed is moved. 

        The function should accept 0 or 1 parameters, if the function accepts 1 parameter an 
        instance of ``BlueDotPosition`` will be returned representing the new position of where the 
        Blue Dot is held.
        """
        return self._when_moved

    @when_moved.setter
    def when_moved(self, value):
        self._when_moved = value

    @property
    def when_swiped(self):
        """
        Sets or returns the function which is called when the Blue Dot is swiped. 

        The function should accept 0 or 1 parameters, if the function accepts 1 parameter an 
        instance of ``BlueDotSwipe`` will be returned representing the how the Blue Dot was
        swiped.
        """
        return self._when_swiped

    @when_swiped.setter
    def when_swiped(self, value):
        self._when_swiped = value

    @property
    def rotation_segments(self):
        """
        Sets or returns the number of virtual segments the Blue Dot is split into for  rotating. 
        Defaults to ``8``.
        """
        return self._rotation_segments

    @rotation_segments.setter
    def rotation_segments(self, value):
        self._rotation_segments = value

    @property
    def when_rotated(self):
        """
        Sets or returns the function which is called when the Blue Dot is rotated (like an
        iPod clock wheel). 

        The function should accept 0 or 1 parameters, if the function accepts 1 parameter an 
        instance of ``BlueDotRotation`` will be returned representing how the Blue Dot was
        rotated.
        """
        return self._when_rotated

    @when_rotated.setter
    def when_rotated(self, value):
        self._when_rotated = value

    @property
    def when_client_connects(self):
        """
        Sets or returns the function which is called when a Blue Dot connects. 
        """
        return self._when_client_connects

    @when_client_connects.setter
    def when_client_connects(self, value):
        self._when_client_connects = value

    @property
    def when_client_disconnects(self):
        """
        Sets or returns the function which is called when a Blue Dot disconnects. 
        """
        return self._when_client_disconnects

    @when_client_disconnects.setter
    def when_client_disconnects(self, value):
        self._when_client_disconnects = value

    @property
    def print_messages(self):
        """
        When set to ``True`` results in messages relating to the status of the bluetooth server
        to be printed.
        """
        return self._print_messages

    @print_messages.setter
    def print_messages(self, value):
        self._print_messages = value

    @property
    def running(self):
        """
        Returns a ``True`` if the server is running.
        """
        return self._server.running

    def start(self):
        """
        Start the BluetoothServer if it is not already running. By default the server is started at
        initialisation.
        """
        self._server.start()
        self._print_message("Server started {}".format(
            self.server.server_address))
        self._print_message("Waiting for connection")

    def _create_server(self):
        self._server = BluetoothServer(
            self._data_received,
            when_client_connects=self._client_connected,
            when_client_disconnects=self._client_disconnected,
            device=self.device,
            port=self.port,
            power_up_device=self._power_up_device,
            auto_start=False)

    def stop(self):
        """
        Stop the bluetooth server.
        """
        self._server.stop()

    def wait_for_connection(self, timeout=None):
        """
        Waits until a Blue Dot client connects. 
        Returns ``True`` if a client connects. 

        :param float timeout:
            Number of seconds to wait for a wait connections, if ``None`` (the default), 
            it will wait indefinetly for a connection from a Blue Dot client.
        """
        return self._is_connected_event.wait(timeout)

    def wait_for_press(self, timeout=None):
        """
        Waits until a Blue Dot is pressed. 
        Returns ``True`` if the Blue Dot was pressed. 

        :param float timeout:
            Number of seconds to wait for a Blue Dot to be pressed, if ``None``
            (the default), it will wait indefinetly.
        """
        return self._is_pressed_event.wait(timeout)

    def wait_for_double_press(self, timeout=None):
        """
        Waits until a Blue Dot is double pressed. 
        Returns ``True`` if the Blue Dot was double pressed. 

        :param float timeout:
            Number of seconds to wait for a Blue Dot to be double pressed, if ``None``
            (the default), it will wait indefinetly.
        """
        return self._is_double_pressed_event.wait(timeout)

    def wait_for_release(self, timeout=None):
        """
        Waits until a Blue Dot is released. 
        Returns ``True`` if the Blue Dot was released. 

        :param float timeout:
            Number of seconds to wait for a Blue Dot to be released, if ``None`` 
            (the default), it will wait indefinetly.
        """
        return self._is_released_event.wait(timeout)

    def wait_for_move(self, timeout=None):
        """
        Waits until the position where the Blue Dot is pressed is moved. 
        Returns ``True`` if the position pressed on the Blue Dot was moved. 

        :param float timeout:
            Number of seconds to wait for the position that the Blue Dot 
            is pressed to move, if ``None`` (the default), it will wait indefinetly.
        """
        return self._is_moved_event.wait(timeout)

    def wait_for_swipe(self, timeout=None):
        """
        Waits until the Blue Dot is swiped. 
        Returns ``True`` if the Blue Dot was swiped. 

        :param float timeout:
            Number of seconds to wait for the Blue Dot to be swiped, if ``None``
            (the default), it will wait indefinetly.
        """
        return self._is_swiped_event.wait(timeout)

    def allow_pairing(self, timeout=60):
        """
        Allow a Bluetooth device to pair with your Raspberry Pi by Putting the adapter 
        into discoverable and pairable mode.

        :param int timeout:
            The time in seconds the adapter will remain pairable. If set to ``None``
            the device will be discoverable and pairable indefinetly. 
        """
        self.server.adapter.allow_pairing(timeout=timeout)

    def _client_connected(self):
        self._is_connected_event.set()
        self._print_message("Client connected {}".format(
            self.server.client_address))
        if self.when_client_connects:
            self.when_client_connects()

    def _client_disconnected(self):
        self._is_connected_event.clear()
        self._print_message("Client disconnected")
        if self.when_client_disconnects:
            self.when_client_disconnects()

    def _data_received(self, data):
        #add the data received to the buffer
        self._data_buffer += data

        #get any full commands ended by \n
        last_command = self._data_buffer.rfind("\n")
        if last_command != -1:
            commands = self._data_buffer[:last_command].split("\n")
            self._process_commands(commands)
            #remove the processed commands from the buffer
            self._data_buffer = self._data_buffer[last_command + 1:]

    def _process_commands(self, commands):
        for command in commands:
            operation, x, y = command.split(",")
            position = BlueDotPosition(x, y)
            #update the current position
            self._position = position

            #dot released
            if operation == "0":
                self._released(position)

            #dot pressed
            elif operation == "1":
                self._pressed(position)

            #dot pressed position moved
            elif operation == "2":
                self._moved(position)

    def _pressed(self, position):
        self._is_pressed_event.set()
        self._is_released_event.clear()
        self._is_moved_event.clear()

        self._double_pressed(position)

        #create new interaction
        self._interaction = BlueDotInteraction(position)

        self._process_callback(self.when_pressed, position)

    def _double_pressed(self, position):
        #was there a previous interaction
        if self._interaction:
            #was it less than the time threshold (0.3 seconds)
            if self._interaction.duration < self._double_press_time:
                #was the dot pressed again in less than the threshold
                if time(
                ) - self._interaction.released_position.time < self._double_press_time:
                    self._is_double_pressed_event.set()

                    self._process_callback(self.when_double_pressed, position)

                    self._is_double_pressed_event.clear()

    def _released(self, position):
        self._is_pressed_event.clear()
        self._is_released_event.set()
        self._is_moved_event.clear()

        self._interaction.released(position)

        self._process_callback(self.when_released, position)

        self._process_swipe()

    def _moved(self, position):
        self._is_moved_event.set()

        self._interaction.moved(position)

        self._process_callback(self.when_moved, position)

        if self.when_rotated:
            self._process_rotation()

        self._is_moved_event.clear()

    def _process_callback(self, callback, arg):
        if callback:
            if len(getfullargspec(callback).args) == 0:
                call_back_t = WrapThread(target=callback)
            else:
                call_back_t = WrapThread(target=callback, args=(arg, ))
            call_back_t.start()

    def _process_swipe(self):
        #was the Blue Dot swiped?
        swipe = BlueDotSwipe(self._interaction)
        if swipe.valid:
            self._is_swiped_event.set()
            if self.when_swiped:
                self._process_callback(self.when_swiped, swipe)

            self._is_swiped_event.clear()

    def _process_rotation(self):
        rotation = BlueDotRotation(self._interaction, self._rotation_segments)
        if rotation.valid:
            self._process_callback(self.when_rotated, rotation)

    def _print_message(self, message):
        if self.print_messages:
            print(message)
Beispiel #34
0
class Collector(Logger):
    '''Main class here. Made to, knowing the device and the attribute from 
       where the data wants to be collected, manage the events to store in a
       file.
    '''
    def __init__(self,
                 devName,
                 attrName,
                 extraAttr,
                 timecollecting,
                 output,
                 loglevel=Logger.info):
        Logger.__init__(self, loglevel)
        self.trace_stream("Collector.__init__()")
        self._devName = devName
        self._dproxy = PyTango.DeviceProxy(self._devName)
        self._attrName = attrName
        self._extraAttrList = []
        self._extraAttrProxies = []
        self.prepareExtraAttrs(extraAttr)
        self._tcollecting = timecollecting
        self._outputName = output
        self._ctrlC = Event()
        self._ctrlC.clear()
        self._events = []
        self._csv = CSVOutput(self._outputName,
                              "%s/%s" % (self._devName, self._attrName),
                              self._extraAttrList, self.getLogLevel())

    #----# first level
    def start(self):
        '''Start the procedure to collect information from where the object
           is configured and store it where the object is also configured to.
        '''
        self.trace_stream("Collector.start()")
        self.subscribe()
        self._csv.open()
        self._csv.writeHeader()
        start_t = time.time()
        while not self._ctrlC.isSet():
            seconds = int(time.time() - start_t)
            if seconds % 10 == 0 and seconds >= 10 and self._checkFileTooBig():
                break
            if seconds % 60 == 0 and seconds >= 60 and \
                                            self._checkTimeCollecting(seconds):
                break
            time.sleep(1)
        self.unsubscribe()
        self._csv.close()

    def stop(self):
        '''Procedure to stop a data collection.
        '''
        self.trace_stream("Collector.stop()")
        self._ctrlC.set()
        self.info_stream("Wait the stop process to finish...")

    #----# second level
    def subscribe(self):
        '''Method to do the procedure to subscribe to all the events this 
           object needs.
        '''
        self.trace_stream("Collector.subscribe()")
        try:
            eventId = self._dproxy.subscribe_event(
                self._attrName, PyTango.EventType.CHANGE_EVENT,
                self._histogramChangeEvent)
            self._events.append(eventId)
        except Exception, e:
            self.error_stream("cannot subscribe", e)
class TorrentCleanerThread(Thread):
  """Really naive transmission torrent cleaner"""
  def __init__(self, host="127.0.0.1", port=9091, ratio=1.0, logger=logging.getLogger()):
    super(TorrentCleanerThread, self).__init__()
    self._clean = Event()
    self._quit = Event()
    self.host = host
    self.port = port
    self.ratio = ratio
    self.log = logger
    self.finished = False
    self.client = None
    self.torrents = {}

  def clean(self):
    self._clean.set()

  def stop(self):
    self._quit.set()
    self._clean.set()
    self.finished = True

  def should_quit(self):
    if self._quit.is_set():
      self.log.info("Cleaner shutting down")
      exit(0)

  def run(self):
    while not self._quit.is_set():
      self.log.info("Waiting for 'clean' event")
      self._clean.wait()
      self.should_quit()

      self.log.info("Running torrent cleaner")
      self.finished = False

      for i in range(1, 5):
        self.should_quit()
        if self.finished:
         break
        self.log.debug("Clean attempt #{}.".format(i))
        try:
          self.log.debug("Acquiring client.")
          self.client = self.client or transmissionrpc.Client(self.host, port=self.port)
          self.log.debug("Getting list of torrents.")
          torrents = self.client.get_torrents()
          t_ids = [i.id for i in torrents]
          for rm in [t for t in self.torrents if t not in t_ids]:
            self.log.info("Removing '{}' from torrent list".format(self.torrents[rm]["name"]))
            self.torrents.pop(rm, None)
          if not torrents:
            self.log.info("No torrents to process!")
            self.finished = True
          else:
            for torrent in torrents:
              self.should_quit()
              if torrent.id not in self.torrents:
                self.torrents[torrent.id] = {"name": torrent.name, "reason": "", "strikes": 0}
              t = self.torrents[torrent.id]
              if torrent_finished(torrent, self.ratio):
                self.log.info("Torrent #{} ('{}') reporting finished.".format(torrent.id, torrent.name))
                t["strikes"] += 1
                t["reason"] = "finished (Done: {}, Ratio: {})".format(torrent.isFinished, torrent.ratio)
              elif torrent_stalled(torrent):
                self.log.info("Torrent #{} ('{}') reporting stalled.".format(torrent.id, torrent.name))
                t["strikes"] += 1
                t["reason"] = "stalled (Stalled: {}, Status: {})".format(torrent.isStalled, torrent.status)
              self.torrents[torrent.id] = t
              if t["strikes"] >= 3:
                self.log.info("Torrent #{} ('{}') being removed because it is {}".format(torrent.id, torrent.name, t["reason"]))
                self.client.remove_torrent(torrent.id, delete_data=True)
                self.torrents.pop(torrent.id, None)
              else:
                self.log.info("Torrent #{} ('{}') is still active (Progress: {}%, Ratio: {})".format(torrent.id, torrent.name, torrent.progress, torrent.ratio))
            else:
              self.finished = True
        except Exception, e:
          self.log.error("ERROR: {}".format(e))
          if i < 4:
            sleep(i * 3)
          continue
      self.log.info("Cleaning run complete!")
      self.client = None
      self._clean.clear()
    self.should_quit()
Beispiel #36
0
class Module(MgrModule):
    COMMANDS = [
        {
            "cmd": "balancer status",
            "desc": "Show balancer status",
            "perm": "r",
        },
        {
            "cmd":
            "balancer mode name=mode,type=CephChoices,strings=none|crush-compat|upmap",
            "desc": "Set balancer mode",
            "perm": "rw",
        },
        {
            "cmd": "balancer on",
            "desc": "Enable automatic balancing",
            "perm": "rw",
        },
        {
            "cmd": "balancer off",
            "desc": "Disable automatic balancing",
            "perm": "rw",
        },
        {
            "cmd": "balancer eval name=plan,type=CephString,req=false",
            "desc":
            "Evaluate data distribution for the current cluster or specific plan",
            "perm": "r",
        },
        {
            "cmd": "balancer eval-verbose name=plan,type=CephString,req=false",
            "desc":
            "Evaluate data distribution for the current cluster or specific plan (verbosely)",
            "perm": "r",
        },
        {
            "cmd":
            "balancer optimize name=plan,type=CephString name=pools,type=CephString,n=N,req=false",
            "desc": "Run optimizer to create a new plan",
            "perm": "rw",
        },
        {
            "cmd": "balancer show name=plan,type=CephString",
            "desc": "Show details of an optimization plan",
            "perm": "r",
        },
        {
            "cmd": "balancer rm name=plan,type=CephString",
            "desc": "Discard an optimization plan",
            "perm": "rw",
        },
        {
            "cmd": "balancer reset",
            "desc": "Discard all optimization plans",
            "perm": "rw",
        },
        {
            "cmd": "balancer dump name=plan,type=CephString",
            "desc": "Show an optimization plan",
            "perm": "r",
        },
        {
            "cmd": "balancer execute name=plan,type=CephString",
            "desc": "Execute an optimization plan",
            "perm": "r",
        },
    ]
    active = False
    run = True
    plans = {}
    mode = ''

    def __init__(self, *args, **kwargs):
        super(Module, self).__init__(*args, **kwargs)
        self.event = Event()

    def handle_command(self, command):
        self.log.warn("Handling command: '%s'" % str(command))
        if command['prefix'] == 'balancer status':
            s = {
                'plans': self.plans.keys(),
                'active': self.active,
                'mode': self.get_config('mode', default_mode),
            }
            return (0, json.dumps(s, indent=4), '')
        elif command['prefix'] == 'balancer mode':
            self.set_config('mode', command['mode'])
            return (0, '', '')
        elif command['prefix'] == 'balancer on':
            if not self.active:
                self.set_config('active', '1')
                self.active = True
            self.event.set()
            return (0, '', '')
        elif command['prefix'] == 'balancer off':
            if self.active:
                self.set_config('active', '')
                self.active = False
            self.event.set()
            return (0, '', '')
        elif command['prefix'] == 'balancer eval' or command[
                'prefix'] == 'balancer eval-verbose':
            verbose = command['prefix'] == 'balancer eval-verbose'
            if 'plan' in command:
                plan = self.plans.get(command['plan'])
                if not plan:
                    return (-errno.ENOENT, '',
                            'plan %s not found' % command['plan'])
                ms = plan.final_state()
            else:
                ms = MappingState(self.get_osdmap(), self.get("pg_dump"),
                                  'current cluster')
            return (0, self.evaluate(ms, verbose=verbose), '')
        elif command['prefix'] == 'balancer optimize':
            pools = []
            if 'pools' in command:
                pools = command['pools']
            osdmap = self.get_osdmap()
            valid_pool_names = [
                p['pool_name'] for p in osdmap.dump().get('pools', [])
            ]
            invalid_pool_names = []
            for p in pools:
                if p not in valid_pool_names:
                    invalid_pool_names.append(p)
            if len(invalid_pool_names):
                return (-errno.EINVAL, '',
                        'pools %s not found' % invalid_pool_names)
            plan = self.plan_create(command['plan'], osdmap, pools)
            self.optimize(plan)
            return (0, '', '')
        elif command['prefix'] == 'balancer rm':
            self.plan_rm(command['plan'])
            return (0, '', '')
        elif command['prefix'] == 'balancer reset':
            self.plans = {}
            return (0, '', '')
        elif command['prefix'] == 'balancer dump':
            plan = self.plans.get(command['plan'])
            if not plan:
                return (-errno.ENOENT, '',
                        'plan %s not found' % command['plan'])
            return (0, plan.dump(), '')
        elif command['prefix'] == 'balancer show':
            plan = self.plans.get(command['plan'])
            if not plan:
                return (-errno.ENOENT, '',
                        'plan %s not found' % command['plan'])
            return (0, plan.show(), '')
        elif command['prefix'] == 'balancer execute':
            plan = self.plans.get(command['plan'])
            if not plan:
                return (-errno.ENOENT, '',
                        'plan %s not found' % command['plan'])
            self.execute(plan)
            self.plan_rm(plan)
            return (0, '', '')
        else:
            return (-errno.EINVAL, '',
                    "Command not found '{0}'".format(command['prefix']))

    def shutdown(self):
        self.log.info('Stopping')
        self.run = False
        self.event.set()

    def time_in_interval(self, tod, begin, end):
        if begin <= end:
            return tod >= begin and tod < end
        else:
            return tod >= begin or tod < end

    def serve(self):
        self.log.info('Starting')
        while self.run:
            self.active = self.get_config('active', '') is not ''
            begin_time = self.get_config('begin_time') or '0000'
            end_time = self.get_config('end_time') or '2400'
            timeofday = time.strftime('%H%M', time.localtime())
            self.log.debug('Waking up [%s, scheduled for %s-%s, now %s]',
                           "active" if self.active else "inactive", begin_time,
                           end_time, timeofday)
            sleep_interval = float(
                self.get_config('sleep_interval', default_sleep_interval))
            if self.active and self.time_in_interval(timeofday, begin_time,
                                                     end_time):
                self.log.debug('Running')
                name = 'auto_%s' % time.strftime(TIME_FORMAT, time.gmtime())
                plan = self.plan_create(name, self.get_osdmap(), [])
                if self.optimize(plan):
                    self.execute(plan)
                self.plan_rm(name)
            self.log.debug('Sleeping for %d', sleep_interval)
            self.event.wait(sleep_interval)
            self.event.clear()

    def plan_create(self, name, osdmap, pools):
        plan = Plan(
            name,
            MappingState(osdmap, self.get("pg_dump"),
                         'plan %s initial' % name), pools)
        self.plans[name] = plan
        return plan

    def plan_rm(self, name):
        if name in self.plans:
            del self.plans[name]

    def calc_eval(self, ms):
        pe = Eval(ms)
        pool_rule = {}
        pool_info = {}
        for p in ms.osdmap_dump.get('pools', []):
            pe.pool_name[p['pool']] = p['pool_name']
            pe.pool_id[p['pool_name']] = p['pool']
            pool_rule[p['pool_name']] = p['crush_rule']
            pe.pool_roots[p['pool_name']] = []
            pool_info[p['pool_name']] = p
        pools = pe.pool_id.keys()
        if len(pools) == 0:
            return pe
        self.log.debug('pool_name %s' % pe.pool_name)
        self.log.debug('pool_id %s' % pe.pool_id)
        self.log.debug('pools %s' % pools)
        self.log.debug('pool_rule %s' % pool_rule)

        osd_weight = {
            a['osd']: a['weight']
            for a in ms.osdmap_dump.get('osds', [])
        }

        # get expected distributions by root
        actual_by_root = {}
        rootids = ms.crush.find_takes()
        roots = []
        for rootid in rootids:
            root = ms.crush.get_item_name(rootid)
            pe.root_ids[root] = rootid
            roots.append(root)
            ls = ms.osdmap.get_pools_by_take(rootid)
            pe.root_pools[root] = []
            for poolid in ls:
                pe.pool_roots[pe.pool_name[poolid]].append(root)
                pe.root_pools[root].append(pe.pool_name[poolid])
            weight_map = ms.crush.get_take_weight_osd_map(rootid)
            adjusted_map = {
                osd: cw * osd_weight.get(osd, 1.0)
                for osd, cw in weight_map.iteritems()
            }
            sum_w = sum(adjusted_map.values()) or 1.0
            pe.target_by_root[root] = {
                osd: w / sum_w
                for osd, w in adjusted_map.iteritems()
            }
            actual_by_root[root] = {
                'pgs': {},
                'objects': {},
                'bytes': {},
            }
            for osd in pe.target_by_root[root].iterkeys():
                actual_by_root[root]['pgs'][osd] = 0
                actual_by_root[root]['objects'][osd] = 0
                actual_by_root[root]['bytes'][osd] = 0
            pe.total_by_root[root] = {
                'pgs': 0,
                'objects': 0,
                'bytes': 0,
            }
        self.log.debug('pool_roots %s' % pe.pool_roots)
        self.log.debug('root_pools %s' % pe.root_pools)
        self.log.debug('target_by_root %s' % pe.target_by_root)

        # pool and root actual
        for pool, pi in pool_info.iteritems():
            poolid = pi['pool']
            pm = ms.pg_up_by_poolid[poolid]
            pgs = 0
            objects = 0
            bytes = 0
            pgs_by_osd = {}
            objects_by_osd = {}
            bytes_by_osd = {}
            for root in pe.pool_roots[pool]:
                for osd in pe.target_by_root[root].iterkeys():
                    pgs_by_osd[osd] = 0
                    objects_by_osd[osd] = 0
                    bytes_by_osd[osd] = 0
            for pgid, up in pm.iteritems():
                for osd in [int(osd) for osd in up]:
                    if osd == CRUSHMap.ITEM_NONE:
                        continue
                    pgs_by_osd[osd] += 1
                    objects_by_osd[osd] += ms.pg_stat[pgid]['num_objects']
                    bytes_by_osd[osd] += ms.pg_stat[pgid]['num_bytes']
                    # pick a root to associate this pg instance with.
                    # note that this is imprecise if the roots have
                    # overlapping children.
                    # FIXME: divide bytes by k for EC pools.
                    for root in pe.pool_roots[pool]:
                        if osd in pe.target_by_root[root]:
                            actual_by_root[root]['pgs'][osd] += 1
                            actual_by_root[root]['objects'][osd] += ms.pg_stat[
                                pgid]['num_objects']
                            actual_by_root[root]['bytes'][osd] += ms.pg_stat[
                                pgid]['num_bytes']
                            pgs += 1
                            objects += ms.pg_stat[pgid]['num_objects']
                            bytes += ms.pg_stat[pgid]['num_bytes']
                            pe.total_by_root[root]['pgs'] += 1
                            pe.total_by_root[root]['objects'] += ms.pg_stat[
                                pgid]['num_objects']
                            pe.total_by_root[root]['bytes'] += ms.pg_stat[
                                pgid]['num_bytes']
                            break
            pe.count_by_pool[pool] = {
                'pgs': {k: v
                        for k, v in pgs_by_osd.iteritems()},
                'objects': {k: v
                            for k, v in objects_by_osd.iteritems()},
                'bytes': {k: v
                          for k, v in bytes_by_osd.iteritems()},
            }
            pe.actual_by_pool[pool] = {
                'pgs': {
                    k: float(v) / float(max(pgs, 1))
                    for k, v in pgs_by_osd.iteritems()
                },
                'objects': {
                    k: float(v) / float(max(objects, 1))
                    for k, v in objects_by_osd.iteritems()
                },
                'bytes': {
                    k: float(v) / float(max(bytes, 1))
                    for k, v in bytes_by_osd.iteritems()
                },
            }
            pe.total_by_pool[pool] = {
                'pgs': pgs,
                'objects': objects,
                'bytes': bytes,
            }
        for root, m in pe.total_by_root.iteritems():
            pe.count_by_root[root] = {
                'pgs': {
                    k: float(v)
                    for k, v in actual_by_root[root]['pgs'].iteritems()
                },
                'objects': {
                    k: float(v)
                    for k, v in actual_by_root[root]['objects'].iteritems()
                },
                'bytes': {
                    k: float(v)
                    for k, v in actual_by_root[root]['bytes'].iteritems()
                },
            }
            pe.actual_by_root[root] = {
                'pgs': {
                    k: float(v) / float(max(pe.total_by_root[root]['pgs'], 1))
                    for k, v in actual_by_root[root]['pgs'].iteritems()
                },
                'objects': {
                    k:
                    float(v) / float(max(pe.total_by_root[root]['objects'], 1))
                    for k, v in actual_by_root[root]['objects'].iteritems()
                },
                'bytes': {
                    k:
                    float(v) / float(max(pe.total_by_root[root]['bytes'], 1))
                    for k, v in actual_by_root[root]['bytes'].iteritems()
                },
            }
        self.log.debug('actual_by_pool %s' % pe.actual_by_pool)
        self.log.debug('actual_by_root %s' % pe.actual_by_root)

        # average and stddev and score
        pe.stats_by_root = {
            a: pe.calc_stats(b, pe.target_by_root[a], pe.total_by_root[a])
            for a, b in pe.count_by_root.iteritems()
        }

        # the scores are already normalized
        pe.score_by_root = {
            r: {
                'pgs': pe.stats_by_root[r]['pgs']['score'],
                'objects': pe.stats_by_root[r]['objects']['score'],
                'bytes': pe.stats_by_root[r]['bytes']['score'],
            }
            for r in pe.total_by_root.keys()
        }

        # total score is just average of normalized stddevs
        pe.score = 0.0
        for r, vs in pe.score_by_root.iteritems():
            for k, v in vs.iteritems():
                pe.score += v
        pe.score /= 3 * len(roots)
        return pe

    def evaluate(self, ms, verbose=False):
        pe = self.calc_eval(ms)
        return pe.show(verbose=verbose)

    def optimize(self, plan):
        self.log.info('Optimize plan %s' % plan.name)
        plan.mode = self.get_config('mode', default_mode)
        max_misplaced = float(
            self.get_config('max_misplaced', default_max_misplaced))
        self.log.info('Mode %s, max misplaced %f' % (plan.mode, max_misplaced))

        info = self.get('pg_status')
        unknown = info.get('unknown_pgs_ratio', 0.0)
        degraded = info.get('degraded_ratio', 0.0)
        inactive = info.get('inactive_pgs_ratio', 0.0)
        misplaced = info.get('misplaced_ratio', 0.0)
        self.log.debug('unknown %f degraded %f inactive %f misplaced %g',
                       unknown, degraded, inactive, misplaced)
        if unknown > 0.0:
            self.log.info('Some PGs (%f) are unknown; waiting', unknown)
        elif degraded > 0.0:
            self.log.info('Some objects (%f) are degraded; waiting', degraded)
        elif inactive > 0.0:
            self.log.info('Some PGs (%f) are inactive; waiting', inactive)
        elif misplaced >= max_misplaced:
            self.log.info('Too many objects (%f > %f) are misplaced; waiting',
                          misplaced, max_misplaced)
        else:
            if plan.mode == 'upmap':
                return self.do_upmap(plan)
            elif plan.mode == 'crush-compat':
                return self.do_crush_compat(plan)
            elif plan.mode == 'none':
                self.log.info('Idle')
            else:
                self.log.info('Unrecognized mode %s' % plan.mode)
        return False

        ##

    def do_upmap(self, plan):
        self.log.info('do_upmap')
        max_iterations = int(self.get_config('upmap_max_iterations', 10))
        max_deviation = float(self.get_config('upmap_max_deviation', .01))

        ms = plan.initial
        if len(plan.pools):
            pools = plan.pools
        else:  # all
            pools = [
                str(i['pool_name']) for i in ms.osdmap_dump.get('pools', [])
            ]
        if len(pools) == 0:
            self.log.info('no pools, nothing to do')
            return False
        # shuffle pool list so they all get equal (in)attention
        random.shuffle(pools)
        self.log.info('pools %s' % pools)

        inc = plan.inc
        total_did = 0
        left = max_iterations
        for pool in pools:
            did = ms.osdmap.calc_pg_upmaps(inc, max_deviation, left, [pool])
            total_did += did
            left -= did
            if left <= 0:
                break
        self.log.info('prepared %d/%d changes' % (total_did, max_iterations))
        return True

    def do_crush_compat(self, plan):
        self.log.info('do_crush_compat')
        max_iterations = int(self.get_config('crush_compat_max_iterations',
                                             25))
        if max_iterations < 1:
            return False
        step = float(self.get_config('crush_compat_step', .5))
        if step <= 0 or step >= 1.0:
            return False
        max_misplaced = float(
            self.get_config('max_misplaced', default_max_misplaced))
        min_pg_per_osd = 2

        ms = plan.initial
        osdmap = ms.osdmap
        crush = osdmap.get_crush()
        pe = self.calc_eval(ms)
        if pe.score == 0:
            self.log.info('Distribution is already perfect')
            return False

        # get current osd reweights
        orig_osd_weight = {
            a['osd']: a['weight']
            for a in ms.osdmap_dump.get('osds', [])
        }
        reweighted_osds = [
            a for a, b in orig_osd_weight.iteritems() if b < 1.0 and b > 0.0
        ]

        # get current compat weight-set weights
        orig_ws = self.get_compat_weight_set_weights(ms)
        if orig_ws is None:
            return False
        orig_ws = {a: b for a, b in orig_ws.iteritems() if a >= 0}

        # Make sure roots don't overlap their devices.  If so, we
        # can't proceed.
        roots = pe.target_by_root.keys()
        self.log.debug('roots %s', roots)
        visited = {}
        overlap = {}
        root_ids = {}
        for root, wm in pe.target_by_root.iteritems():
            for osd in wm.iterkeys():
                if osd in visited:
                    overlap[osd] = 1
                visited[osd] = 1
        if len(overlap) > 0:
            self.log.error('error: some osds belong to multiple subtrees: %s' %
                           overlap)
            return False

        key = 'pgs'  # pgs objects or bytes

        # go
        best_ws = copy.deepcopy(orig_ws)
        best_ow = copy.deepcopy(orig_osd_weight)
        best_pe = pe
        left = max_iterations
        bad_steps = 0
        next_ws = copy.deepcopy(best_ws)
        next_ow = copy.deepcopy(best_ow)
        while left > 0:
            # adjust
            self.log.debug('best_ws %s' % best_ws)
            random.shuffle(roots)
            for root in roots:
                pools = best_pe.root_pools[root]
                osds = len(best_pe.target_by_root[root])
                min_pgs = osds * min_pg_per_osd
                if best_pe.total_by_root[root][key] < min_pgs:
                    self.log.info(
                        'Skipping root %s (pools %s), total pgs %d '
                        '< minimum %d (%d per osd)', root, pools,
                        best_pe.total_by_root[root][key], min_pgs,
                        min_pg_per_osd)
                    continue
                self.log.info('Balancing root %s (pools %s) by %s' %
                              (root, pools, key))
                target = best_pe.target_by_root[root]
                actual = best_pe.actual_by_root[root][key]
                queue = sorted(actual.keys(),
                               key=lambda osd: -abs(target[osd] - actual[osd]))
                for osd in queue:
                    if orig_osd_weight[osd] == 0:
                        self.log.debug('skipping out osd.%d', osd)
                    else:
                        deviation = target[osd] - actual[osd]
                        if deviation == 0:
                            break
                        self.log.debug('osd.%d deviation %f', osd, deviation)
                        weight = best_ws[osd]
                        ow = orig_osd_weight[osd]
                        if actual[osd] > 0:
                            calc_weight = target[osd] / actual[
                                osd] * weight * ow
                        else:
                            # not enough to go on here... keep orig weight
                            calc_weight = weight / orig_osd_weight[osd]
                        new_weight = weight * (1.0 - step) + calc_weight * step
                        self.log.debug('Reweight osd.%d %f -> %f', osd, weight,
                                       new_weight)
                        next_ws[osd] = new_weight
                        if ow < 1.0:
                            new_ow = min(
                                1.0, max(step + (1.0 - step) * ow, ow + .005))
                            self.log.debug('Reweight osd.%d reweight %f -> %f',
                                           osd, ow, new_ow)
                            next_ow[osd] = new_ow

                # normalize weights under this root
                root_weight = crush.get_item_weight(pe.root_ids[root])
                root_sum = sum(b for a, b in next_ws.iteritems()
                               if a in target.keys())
                if root_sum > 0 and root_weight > 0:
                    factor = root_sum / root_weight
                    self.log.debug(
                        'normalizing root %s %d, weight %f, '
                        'ws sum %f, factor %f', root, pe.root_ids[root],
                        root_weight, root_sum, factor)
                    for osd in actual.keys():
                        next_ws[osd] = next_ws[osd] / factor

            # recalc
            plan.compat_ws = copy.deepcopy(next_ws)
            next_ms = plan.final_state()
            next_pe = self.calc_eval(next_ms)
            next_misplaced = next_ms.calc_misplaced_from(ms)
            self.log.debug('Step result score %f -> %f, misplacing %f',
                           best_pe.score, next_pe.score, next_misplaced)

            if next_misplaced > max_misplaced:
                if best_pe.score < pe.score:
                    self.log.debug('Step misplaced %f > max %f, stopping',
                                   next_misplaced, max_misplaced)
                    break
                step /= 2.0
                next_ws = copy.deepcopy(best_ws)
                next_ow = copy.deepcopy(best_ow)
                self.log.debug(
                    'Step misplaced %f > max %f, reducing step to %f',
                    next_misplaced, max_misplaced, step)
            else:
                if next_pe.score > best_pe.score * 1.0001:
                    bad_steps += 1
                    if bad_steps < 5 and random.randint(0, 100) < 70:
                        self.log.debug('Score got worse, taking another step')
                    else:
                        step /= 2.0
                        next_ws = copy.deepcopy(best_ws)
                        next_ow = copy.deepcopy(best_ow)
                        self.log.debug(
                            'Score got worse, trying smaller step %f', step)
                else:
                    bad_steps = 0
                    best_pe = next_pe
                    best_ws = next_ws
                    best_ow = next_ow
                    if best_pe.score == 0:
                        break
            left -= 1

        # allow a small regression if we are phasing out osd weights
        fudge = 0
        if next_ow != orig_osd_weight:
            fudge = .001

        if best_pe.score < pe.score + fudge:
            self.log.info('Success, score %f -> %f', pe.score, best_pe.score)
            plan.compat_ws = best_ws
            for osd, w in best_ow.iteritems():
                if w != orig_osd_weight[osd]:
                    self.log.debug('osd.%d reweight %f', osd, w)
                    plan.osd_weights[osd] = w
            return True
        else:
            self.log.info('Failed to find further optimization, score %f',
                          pe.score)
            plan.compat_ws = {}
            return False

    def get_compat_weight_set_weights(self, ms):
        if '-1' not in ms.crush_dump.get('choose_args', {}):
            # enable compat weight-set first
            self.log.debug('ceph osd crush weight-set create-compat')
            result = CommandResult('')
            self.send_command(
                result, 'mon', '',
                json.dumps({
                    'prefix': 'osd crush weight-set create-compat',
                    'format': 'json',
                }), '')
            r, outb, outs = result.wait()
            if r != 0:
                self.log.error('Error creating compat weight-set')
                return

            result = CommandResult('')
            self.send_command(
                result, 'mon', '',
                json.dumps({
                    'prefix': 'osd crush dump',
                    'format': 'json',
                }), '')
            r, outb, outs = result.wait()
            if r != 0:
                self.log.error('Error dumping crush map')
                return
            try:
                crushmap = json.loads(outb)
            except:
                raise RuntimeError('unable to parse crush map')
        else:
            crushmap = ms.crush_dump

        raw = crushmap.get('choose_args', {}).get('-1', [])
        weight_set = {}
        for b in raw:
            bucket = None
            for t in crushmap['buckets']:
                if t['id'] == b['bucket_id']:
                    bucket = t
                    break
            if not bucket:
                raise RuntimeError('could not find bucket %s' % b['bucket_id'])
            self.log.debug('bucket items %s' % bucket['items'])
            self.log.debug('weight set %s' % b['weight_set'][0])
            if len(bucket['items']) != len(b['weight_set'][0]):
                raise RuntimeError(
                    'weight-set size does not match bucket items')
            for pos in range(len(bucket['items'])):
                weight_set[bucket['items'][pos]
                           ['id']] = b['weight_set'][0][pos]

        self.log.debug('weight_set weights %s' % weight_set)
        return weight_set

    def do_crush(self):
        self.log.info('do_crush (not yet implemented)')

    def do_osd_weight(self):
        self.log.info('do_osd_weight (not yet implemented)')

    def execute(self, plan):
        self.log.info('Executing plan %s' % plan.name)

        commands = []

        # compat weight-set
        if len(plan.compat_ws) and \
           '-1' not in plan.initial.crush_dump.get('choose_args', {}):
            self.log.debug('ceph osd crush weight-set create-compat')
            result = CommandResult('')
            self.send_command(
                result, 'mon', '',
                json.dumps({
                    'prefix': 'osd crush weight-set create-compat',
                    'format': 'json',
                }), '')
            r, outb, outs = result.wait()
            if r != 0:
                self.log.error('Error creating compat weight-set')
                return

        for osd, weight in plan.compat_ws.iteritems():
            self.log.info(
                'ceph osd crush weight-set reweight-compat osd.%d %f', osd,
                weight)
            result = CommandResult('')
            self.send_command(
                result, 'mon', '',
                json.dumps({
                    'prefix': 'osd crush weight-set reweight-compat',
                    'format': 'json',
                    'item': 'osd.%d' % osd,
                    'weight': [weight],
                }), '')
            commands.append(result)

        # new_weight
        reweightn = {}
        for osd, weight in plan.osd_weights.iteritems():
            reweightn[str(osd)] = str(int(weight * float(0x10000)))
        if len(reweightn):
            self.log.info('ceph osd reweightn %s', reweightn)
            result = CommandResult('')
            self.send_command(
                result, 'mon', '',
                json.dumps({
                    'prefix': 'osd reweightn',
                    'format': 'json',
                    'weights': json.dumps(reweightn),
                }), '')
            commands.append(result)

        # upmap
        incdump = plan.inc.dump()
        for pgid in incdump.get('old_pg_upmap_items', []):
            self.log.info('ceph osd rm-pg-upmap-items %s', pgid)
            result = CommandResult('foo')
            self.send_command(
                result, 'mon', '',
                json.dumps({
                    'prefix': 'osd rm-pg-upmap-items',
                    'format': 'json',
                    'pgid': pgid,
                }), 'foo')
            commands.append(result)

        for item in incdump.get('new_pg_upmap_items', []):
            self.log.info('ceph osd pg-upmap-items %s mappings %s',
                          item['pgid'], item['mappings'])
            osdlist = []
            for m in item['mappings']:
                osdlist += [m['from'], m['to']]
            result = CommandResult('foo')
            self.send_command(
                result, 'mon', '',
                json.dumps({
                    'prefix': 'osd pg-upmap-items',
                    'format': 'json',
                    'pgid': item['pgid'],
                    'id': osdlist,
                }), 'foo')
            commands.append(result)

        # wait for commands
        self.log.debug('commands %s' % commands)
        for result in commands:
            r, outb, outs = result.wait()
            if r != 0:
                self.log.error('Error on command')
                return
        self.log.debug('done')
Beispiel #37
0
class GDriveUploadManager():
    """handles gdrive upload management, enqueues files, start/stop workers,...
    """
    _MAX_WORKERS: ClassVar[int] = 3

    def __init__(self, upload_fn: Callable[[str], None], queue_size: int = 100) -> None:
        """ctor

        Args:
            upload_fn (Callable[[str], None]): function to upload file
            queue_size (int, optional): gdrive upload queue size. Defaults to 100.
        """
        self._stop_event = Event()
        self._queue: Queue[str] = Queue(maxsize=queue_size)
        self._upload_fn = upload_fn
        self._executor = ThreadPoolExecutor(max_workers=self._MAX_WORKERS, thread_name_prefix='UploadWorkerThread')
        self._worker_futures = None

    def enqueue_files(self, files: List[str]) -> None:
        """enqueue files for upload.

        Args:
            files (List[str]): path of the files to enqueue 
        """
        for file in files:
            self._enqueue_file(file)

    def _enqueue_file(self, file_path: str) -> None:
        LOGGER.debug(f"Enqueuing file: {file_path}")
        try:
            self._queue.put_nowait(file_path)
        except Full:
            LOGGER.warning(f"maxium queue length of {self._queue.maxsize} reached. Loosing item {file_path}")

    def start(self) -> None:
        """fire up workers

        Raises:
            GDriveError: if upload workers already running
        """
        if self._worker_futures:
            raise GDriveError("Upload workers already running")

        LOGGER.info("Starting up workers")
        # set up dictionary of worker futures
        self._worker_futures = dict(
            enumerate(self._executor.submit(self._upload_worker) for _ in range(self._MAX_WORKERS))
        )

    def stop(self) -> None:
        """stop workers gracefully

        Args:
            timeout_sec (float, optional): timeout seconds. Defaults to 10.0.

        Raises:
            GDriveError: if daemon failed to stop within timeout
        """
        if not self._worker_futures:
            LOGGER.debug("Trying to stop workers, but none was running before")
            return

        LOGGER.info("Shutting down workers")
        self._stop_event.set()
        LOGGER.debug("Cancel futures")
        for _, worker_future in self._worker_futures.items():
            worker_future.cancel()

        LOGGER.debug("Shutdown executor workers")
        self._executor.shutdown(wait=True)

        self._worker_futures = None
        self._stop_event.clear()

        LOGGER.info("Shutdown successful")

    def _upload_worker(self) -> None:
        LOGGER.info("Init")

        try:
            # wait between 100ms...500ms for eventual stop
            while not self._stop_event.wait(round(uniform(0.1, 0.5), 1)):
                try:
                    file = self._queue.get_nowait()
                except Empty:
                    # queue is empty, continue loop
                    continue

                try:
                    LOGGER.debug(f"Starting upload: {file}")
                    self._upload_fn(file)
                    LOGGER.debug(f"Upload successful: {file}")
                except GDriveError as e:
                    # gdrive errors do not stop the thread,
                    # so that the upload component can recover from google driver errors
                    LOGGER.warning(f"Upload failed: {file} with error: {e}")
                finally:
                    # indicate formerly enqueued task is done
                    # therefore also a unsuccessful upload, in case of GDriveError, won't lead to a retry
                    # the queue should be ready for new work and not be flodded with old upload retries
                    self._queue.task_done()

        except CamguardError as e:
            LOGGER.exception(f"Unrecoverable error in upload worker: {e.message}", exc_info=e)

        # skipcq: PYL-W0703
        except Exception as e:
            LOGGER.exception("Unrecoverable error in upload worker", exc_info=e)

        LOGGER.info("Exit")
Beispiel #38
0
class Scales:
    tare = 0

    def __init__(self, port, calibrated_1g=-2000.0, measurements=1, **kwargs):
        self._abort_event = Event()
        self.calibrated_1g = calibrated_1g
        self.measurements = measurements

        if 'MOCK_SCALES' in os.environ:
            logger.warning('Using mocked scales!')
            self.scales = MockScalesImpl()
        else:
            self.scales = ScalesImpl(port=port, **kwargs)

    def reset(self, tare=None):
        self.scales.reset()
        self._raw_measure(6)  # skipping some data to stabilize
        self.tare = tare or self._raw_measure()
        logger.info('set tare to %f', self.tare)

    def _raw_measure(self, measurements=None):
        measures = self.scales.get_raw_data(measurements or self.measurements)
        mean = statistics.mean(measures)
        #logger.debug('mean measurements: %f', mean)
        return mean

    def measure(self):
        try:
            no_tare = self._raw_measure() - self.tare
            #logger.debug('no_tare: %f', no_tare)

            weight_in_gr = no_tare / self.calibrated_1g
            #logger.debug('weight_in_gr: %f', weight_in_gr)

            return weight_in_gr

        except TimeoutError as e:
            raise ScalesTimeoutException from e

    def wait_for_weight(self,
                        target,
                        timeout=20000,
                        on_progress=lambda d, s: None):
        self._abort_event.clear()
        result_queue = Queue()

        def poller():
            logger.debug('started scales poller')
            time_is_out = make_timeout(timeout)
            try:
                v = self.measure()
                while not (v > target if target > 0 else v < target):
                    if self._abort_event.is_set():
                        return result_queue.put(
                            WaitingForWeightAbortedException())

                    if time_is_out():
                        return result_queue.put(ScalesTimeoutException(v))

                    v = self.measure()
                    logger.info('got measurement: %f', v)
                    on_progress(min(v, target), target)

                result_queue.put(v)
            except Exception as e:
                result_queue.put(e)
                raise

        Thread(target=poller, daemon=True).start()

        logger.info('waiting for a target weight of %f', target)
        result = result_queue.get(block=True)

        self.scales.stop()
        if isinstance(result, Exception):
            raise result

        return result

    def abort_waiting_for_weight(self):
        self._abort_event.set()
Beispiel #39
0
class VNSSimulator:
    """The VNS simulator.  It gives clients control of nodes in simulated
    topologies."""
    def __init__(self):
        # close out any hanging stats records (shouldn't be any unless the
        # server was shutdown abnormally with no chance to cleanup)
        db.UsageStats.objects.filter(active=True).update(active=False)

        # free any hanging temporary topologies
        for t in db.Topology.objects.filter(temporary=True):
            AddressAllocation.free_topology(t.id)

        self.topologies = {} # maps active topology ID to its Topology object
        self.resolver = TopologyResolver() # maps MAC/IP addresses to a Topology
        self.clients = {}    # maps active conn to the topology ID it is conn to
        self.server = create_vns_server(VNS_DEFAULT_PORT,
                                        self.handle_recv_msg,
                                        self.handle_new_client,
                                        self.handle_client_disconnected)
        self.ti_clients = {} # maps active TI conns to the topology ID it is conn to
        self.ti_server = create_ti_server(TI_DEFAULT_PORT,
                                          self.handle_recv_ti_msg,
                                          self.handle_new_client,
                                          self.handle_ti_client_disconnected)
        if BORDER_DEV_NAME:
            self.__start_raw_socket(BORDER_DEV_NAME)
            # run pcap in another thread (it will run forever)
            reactor.callInThread(self.__run_pcap, BORDER_DEV_NAME)
        else:
            self.raw_socket = None

        # lock used to prevent self.topologies from being *changed* by the main
        # twisted thread while the topology queue service thread is reading it
        self.topologies_lock = Lock()

        # communicates from the main twisted thread to the topology queue
        # service thread that the topologies dictionary has changed
        self.topologies_changed = False

        # The topology queue service thread will wait on this condition for a
        # a chosen/dequeued job to be finish (so it can pick the next one).
        self.service_condition = Condition()

        # Is set when a job is enqueued.  Is cleared when the queues are empty.
        # The topology queue service thread will clear this event if it makes a
        # a pass over all the queues and they are empty.  If it makes a pass
        # and this event is cleared, then it will wait on this event.
        self.job_available_event = Event()

        # run the topology queue service thread
        reactor.callInThread(self.__run_topology_queue_service_thread)

        self.periodic_callback()

    def __run_pcap(self, dev):
        """Start listening for packets coming in from the outside world."""
        MAX_LEN      = 2000    # max size of packet to capture
        PROMISCUOUS  = 1       # promiscuous mode?
        READ_TIMEOUT = 100     # in milliseconds
        MAX_PKTS     = -1      # number of packets to capture; -1 => no limit

        # the method which will be called when a packet is captured
        def ph(_, data):
            # thread safety: call from the main twisted event loop
            reactor.callFromThread(self.handle_packet_from_outside, data)

        # start the packet capture
        try:
            p = open_live(dev, MAX_LEN, PROMISCUOUS, READ_TIMEOUT)
        except PcapError:
            log_exception(logging.CRITICAL, 'failed to start pcap')
            sys.exit(-1)

        p.setfilter(PCAP_FILTER)
        logging.info("Listening on %s: net=%s, mask=%s, filter=%s" % (dev, p.getnet(), p.getmask(), PCAP_FILTER))
        p.loop(MAX_PKTS, ph)

    def __run_topology_queue_service_thread(self):
        """Monitors the job queue of each topology and serves them in a round
        robin fashion."""
        # list of queues to service
        local_job_queues_list = []

        while True:
            # whether or not a job has been serviced on this loop
            serviced_a_job = False

            # get a copy of the latest topology list in a thread-safe manner
            with self.topologies_lock:
                if self.topologies_changed:
                    local_job_queues_list = [t.job_queue for t in self.topologies.values()]
                    self.topologies_changed = False

            # serve each topology's queue
            for q in local_job_queues_list:
                job = q.start_service()
                while job:
                    # thread safety: run each job from the main twisted event loop
                    self.__return_after_running_job_on_main_thread(job)
                    job = q.task_done()
                    serviced_a_job = True

            # If we haven't done anything for a while, pause for about 50ms (no
            # reason to run up the CPU by repeatedly checking empty queues).
            # Implementation Note: We could get the thread to pause only when it
            # needed to by using a conditional wait, but this would add overhead
            # when there were lots of jobs (when we don't need overhead).
            if not serviced_a_job:
                if self.job_available_event.is_set():
                    self.job_available_event.clear()
                else:
                    self.job_available_event.wait()

    def __do_job_then_notify(self, job):
        """Acquires the service_condition lock, runs job, and the notifies all
        threads waiting on service_condition."""
        with self.service_condition:
            job()
            self.service_condition.notifyAll()

    def __return_after_running_job_on_main_thread(self, job):
        """Requests that job be run on the main thread.  Waits on
        service_condition until it is notified that the job is done."""
        with self.service_condition:
            # ask the main thread to run our job (it cannot start until we release this lock)
            reactor.callFromThread(lambda : self.__do_job_then_notify(job))

            # wait for the main thread to finish running the job
            self.service_condition.wait()

    def __start_raw_socket(self, dev):
        """Starts a socket for sending raw Ethernet frames."""
        try:
            self.raw_socket = socket.socket(socket.PF_PACKET, socket.SOCK_RAW)
            self.raw_socket.bind((dev, 0x9999))
        except socket.error as e:
            if e.errno == errno.EPERM:
                extra = ' (did you forget to run me with root?)'
            else:
                extra = ''
            log_exception(logging.CRITICAL, 'failed to open raw socket' + extra)
            sys.exit(-1)

    def periodic_callback(self):
        # save statistics values
        for topo in self.topologies.values():
            stats = topo.get_stats()
            if not stats.save_if_changed() and stats.get_idle_time_sec() > MAX_INACTIVE_TOPOLOGY_LIFE_SEC:
                self.stop_topology(topo, 'topology exceeded maximum idle time (%dsec)' % MAX_INACTIVE_TOPOLOGY_LIFE_SEC)
            elif stats.get_num_sec_connected() > MAX_TOPOLOGY_LIFE_SEC:
                self.stop_topology(topo, 'topology exceeded maximum lifetime (%dsec)' % MAX_TOPOLOGY_LIFE_SEC)

        # see if there is any admin message to be sent to all clients
        try:
            bts = db.SystemInfo.objects.get(name='banner_to_send')
            msg_for_clients = bts.value
            bts.delete()
            logging.info('sending message to clients: %s' % msg_for_clients)
            for conn in self.clients.keys():
                for m in VNSBanner.get_banners(msg_for_clients):
                    conn.send(m)
        except db.SystemInfo.DoesNotExist:
            pass

        # note in the db that the reactor thread is still running
        try:
            latest = db.SystemInfo.objects.get(name='last_alive_time')
        except db.SystemInfo.DoesNotExist:
            latest = db.SystemInfo()
            latest.name = 'last_alive_time'
        latest.value = str(int(time()))
        latest.save()

        reactor.callLater(30, self.periodic_callback)

    def handle_packet_from_outside(self, packet):
        """Forwards packet to the appropriate simulation, if any."""
        if len(packet) < 14:
            return # too small to even have an Ethernet header

        # determine which topology(ies) should receive this packet
        pkt = ProtocolHelper.Packet(packet)
        if pkt.is_valid_ipv4():
            topos = self.resolver.resolve_ip(pkt.ip_dst, pkt.ip_src)
            str_addr = 'dst=%s src=%s' % (addrstr(pkt.ip_dst), addrstr(pkt.ip_src))
            rewrite_dst_mac = True
        elif pkt.is_dst_mac_broadcast():
            return # ignore broadcasts
        else:
            topos = self.resolver.resolve_mac(pkt.mac_dst)
            str_addr = 'dst=%s' % addrstr(pkt.mac_dst)
            rewrite_dst_mac = False

        # forward the packet to the appropriate topology(ies)
        if topos:
            logging.debug('sniffed raw packet to %s (topology %s): %s' %
                          (str_addr, ','.join([str(t.id) for t in topos]), pktstr(packet)))
            for topo in topos:
                topo.create_job_for_incoming_packet(packet, rewrite_dst_mac)
                self.job_available_event.set()

    def handle_recv_msg(self, conn, vns_msg):
        if vns_msg is not None:
            logging.debug('recv VNS msg: %s' % vns_msg)
            if vns_msg.get_type() == VNSAuthReply.get_type():
                self.handle_auth_reply(conn, vns_msg, self.terminate_connection)
                return
            elif not conn.vns_authorized:
                logging.warning('received non-auth-reply from unauthenticated user %s: terminating the user' % conn)
                self.terminate_connection(conn, 'simulator expected authentication reply')
            # user is authenticated => any other messages are ok
            elif vns_msg.get_type() == VNSOpen.get_type():
                self.handle_open_msg(conn, vns_msg)
            elif vns_msg.get_type() == VNSClose.get_type():
                self.handle_close_msg(conn)
            elif vns_msg.get_type() == VNSPacket.get_type():
                self.handle_packet_msg(conn, vns_msg)
            elif vns_msg.get_type() == VNSOpenTemplate.get_type():
                self.handle_open_template_msg(conn, vns_msg)
            else:
                logging.debug('unexpected VNS message received: %s' % vns_msg)

    def start_topology(self, tid, client_ip, user):
        """Handles starting up the specified topology id.  Returns a 2-tuple.
        The first element is None and the second is a string if an error occurs;
        otherwise the first element is the topology."""
        try:
            topo = Topology(tid, self.raw_socket, client_ip, user)
            topo.interactors = [] # list of TI connections to this topo
        except TopologyCreationException as e:
            return (None, str(e))
        except db.Topology.DoesNotExist:
            return (None, 'topology %d does not exist' % tid)
        except db.IPAssignment.DoesNotExist:
            return (None, 'topology %d is missing an IP assignment' % tid)
        except db.IPBlockAllocation.DoesNotExist:
            return (None, 'topology %d is not allocated any IPs' % tid)
        except:
            msg = 'topology instantiation unexpectedly failed'
            log_exception(logging.ERROR, msg)
            return (None, msg)

        if topo.has_gateway():
            self.resolver.register_topology(topo)
        with self.topologies_lock:
            self.topologies[tid] = topo
            self.topologies_changed = True
        return (topo, None)

    def stop_topology(self, topo, why, notify_client=True, log_it=True, lvl=logging.INFO):
        """Terminates all clients on a particular topology.  This will in turn
        cause the topology to be deactivated."""
        for client_conn in topo.get_clients():
            self.terminate_connection(client_conn, why, notify_client, log_it, lvl)

    def terminate_connection(self, conn, why, notify_client=True, log_it=True, lvl=logging.INFO):
        """Terminates the client connection conn.  This event will be logged
        unless log_it is False.  If notify_client is True, then the client will
        be sent a VNSClose message with an explanation."""
        # terminate the client
        if conn.connected:
            if notify_client:
                for m in VNSClose.get_banners_and_close(why):
                    conn.send(m)
            conn.transport.loseConnection()

        if log_it:
            logging.log(lvl, 'terminating client (%s): %s' % (conn, why))

        # cleanup client and topology info
        tid = self.clients.get(conn)
        if tid is not None:
            del self.clients[conn]
            topo = self.topologies[tid]
            topo.client_disconnected(conn)
            if not topo.is_active():
                if topo.has_gateway():
                    self.resolver.unregister_topology(topo)
                with self.topologies_lock:
                    del self.topologies[tid]
                    self.topologies_changed = True
                topo.get_stats().finalize()
                if topo.is_temporary():
                    AddressAllocation.free_topology(tid)
                for ti_conn in topo.interactors:
                    self.terminate_ti_connection(ti_conn, 'GOODBYE: Topology %d has been shutdown' % tid)

    def handle_open_msg(self, conn, open_msg):
        # get the topology the client is trying to connect to
        self.handle_connect_to_topo(conn, open_msg.topo_id, open_msg.vhost)

    def handle_connect_to_topo(self, conn, tid, vhost):
        logging.info('client %s connected to topology %d' % (conn, tid))
        try:
            topo = self.topologies[tid]
        except KeyError:
            client_ip = conn.transport.getPeer().host
            (topo, err_msg) = self.start_topology(tid, client_ip, conn.vns_user_profile.user)
            if topo is None:
                self.terminate_connection(conn, err_msg)
                return

        # try to connect the client to the requested node
        self.clients[conn] = tid
        requested_name = vhost.replace('\x00', '')
        user = conn.vns_user_profile.user
        ret = topo.connect_client(conn, user, requested_name)
        if not ret.is_success():
            self.terminate_connection(conn, ret.fail_reason)
        else:
            self.send_motd_to_client(conn)
        if ret.prev_client:
            self.terminate_connection(ret.prev_client,
                                      'a new client (%s) has connected to the topology' % conn)

    def send_motd_to_client(self, conn):
        """Sends a message to a newly connected client, if such a a message is set."""
        # see if there is any admin message to be sent to a client upon connecting
        try:
            msg_for_client = db.SystemInfo.objects.get(name='motd').value
            logging.info('sending message to clients: %s' % msg_for_client)
            for m in VNSBanner.get_banners(msg_for_client):
                conn.send(m)
        except db.SystemInfo.DoesNotExist:
            pass

    def handle_open_template_msg(self, conn, ot):
        try:
            template = db.TopologyTemplate.objects.get(name=ot.template_name)
        except db.TopologyTemplate.DoesNotExist:
            self.terminate_connection(conn, "template '%s' does not exist" % ot.template_name)
            return

        # find an IP block to allocate IPs from for this user
        blocks = db.IPBlock.objects.filter(org=conn.vns_user_profile.org)
        if not blocks:
            self.terminate_connection(conn, "your organization (%s) has no available IP blocks" % conn.vns_user_profile.org)
            return
        ip_block_from = blocks[0]

        if ot.get_src_filters() == VNSOpenTemplate.NO_SRC_FILTERS:
            src_filters = []
        else:
            src_filters = ot.get_src_filters()
        err_msg, topo, alloc, tree = AddressAllocation.instantiate_template(conn.vns_user_profile.user,
                                                                            template,
                                                                            ip_block_from,
                                                                            src_filters,
                                                                            True, True)
        if err_msg:
            self.terminate_connection(conn, err_msg)
        else:
            s2intfnum = '2' if ot.template_name == '1-router 2-server' else '1'
            rtable_msg = VNSRtable(ot.vrhost, VNSSimulator.build_rtable(topo, s2intfnum))
            conn.send(rtable_msg)
            logging.debug('Sent client routing table message: %s' % rtable_msg)
            self.handle_connect_to_topo(conn, topo.id, ot.vrhost)

    @staticmethod
    def build_rtable(topo, s2intfnum):
        # TODO: write this function for real; just a quick hack for now
        s1 = db.IPAssignment.objects.get(topology=topo, port__node=db.Node.objects.get(template=topo.template, name='Server1'))
        s2 = db.IPAssignment.objects.get(topology=topo, port__node=db.Node.objects.get(template=topo.template, name='Server2'))
        return '\n'.join(['0.0.0.0  172.24.74.17  0.0.0.0  eth0',
                          '%s  %s  255.255.255.254  eth1' % (s1.ip, s1.ip),
                          '%s  %s  255.255.255.254  eth%s' % (s2.ip, s2.ip, s2intfnum)])

    def handle_new_client(self, conn):
        """Sends an authentication request to the new user."""
        logging.debug("client %s connected: sending auth request" % conn)
        conn.vns_auth_salt = os.urandom(20)
        conn.vns_authorized = False
        conn.vns_user_profile = None
        conn.send(VNSAuthRequest(conn.vns_auth_salt))

    def handle_auth_reply(self, conn, ar, terminate_connection):
        if not conn.vns_auth_salt:
            msg = 'unexpectedly received authentication reply from conn_user=%s ar_user=%s at %s'
            terminate_connection(conn, msg % (conn.vns_user_profile, ar.username, conn))
            return

        try:
            up = db.UserProfile.objects.get(user__username=ar.username, retired=False)
        except db.UserProfile.DoesNotExist:
            logging.info('unrecognized username tried to login: %s' % ar.username)
            terminate_connection(conn, "authentication failed")
            return

        expected = hashlib.sha1(conn.vns_auth_salt + str(up.get_sim_auth_key())).digest()
        if ar.ssp != expected:
            logging.info('user %s provided an incorrect password' % ar.username)
            terminate_connection(conn, "authentication failed")
        else:
            conn.vns_auth_salt = None # only need one auth reply
            conn.vns_authorized = True
            conn.vns_user_profile = up
            msg = 'authenticated %s as %s' % (conn, ar.username)
            conn.send(VNSAuthStatus(True, msg))

    def handle_client_disconnected(self, conn):
        self.terminate_connection(conn,
                                  'client disconnected (%s)' % conn,
                                  notify_client=False)

    def handle_close_msg(self, conn):
        self.terminate_connection(conn,
                                  'client sent VNSClose (%s)' % conn,
                                  notify_client=False)

    def handle_packet_msg(self, conn, pkt_msg):
        try:
            tid = self.clients[conn]
        except KeyError:
            msg = 'client %s sent VNSPacket message while not connected to any topology' % conn
            self.terminate_connection(conn, msg, lvl=logging.WARN)
            return

        try:
            topo = self.topologies[tid]
        except KeyError:
            msg = 'client %s sent VNSPacket message but its topology (%d) is not active' % (conn, tid)
            self.terminate_connection(conn, msg, lvl=logging.WARN)
            return

        try:
            ret = topo.handle_packet_from_client(conn, pkt_msg)
        except KeyError:
            msg = 'client %s sent VNSPacket message but its topology (%d) does not think it is connected to any node' % (conn, tid)
            self.terminate_connection(conn, msg, lvl=logging.WARN)
            return

        if ret is not True: # bad interface name was given
            self.terminate_connection(conn, ret)

    def cleanup_and_exit(self):
        """Cleanly terminate connected clients and then forcibly terminate the program."""
        # see if the admin put a reason for the shutdown in the database
        try:
            why = db.SystemInfo.objects.get(name='shutdown_reason').value
        except db.SystemInfo.DoesNotExist:
            why = 'the simulator is shutting down'

        logging.info('VNS simulator shutting down: %s' % why)
        for conn in self.clients.keys():
            self.terminate_connection(conn, why)
        os._exit(0) # force the termination (otherwise the pcap thread keeps going)

    def handle_recv_ti_msg(self, conn, ti_msg):
        if ti_msg is not None:
            logging.debug('recv VNS TI msg: %s' % ti_msg)
            if ti_msg.get_type() == VNSAuthReply.get_type():
                self.handle_auth_reply(conn, ti_msg, self.terminate_ti_connection)
                return
            elif not conn.vns_authorized:
                logging.warning('received non-auth-reply from unauthenticated TI user %s: terminating the user' % conn)
                self.terminate_ti_connection(conn, 'ERROR: simulator expected authentication reply')
                return
            # user is authenticated => any other messages are ok
            elif ti_msg.get_type() == TIOpen.get_type():
                self.handle_ti_open_msg(conn, ti_msg)
                return

            # all of the remaining messages require the associated topology
            topo = self.ti_conn_to_topo(conn)
            if not topo:
                return
            try:
                if ti_msg.get_type() == TIPacket.get_type():
                    self.handle_ti_packet_msg(conn, topo, ti_msg)
                elif ti_msg.get_type() == TIPingFromRequest.get_type():
                    self.handle_ti_pingfrom_msg(conn, topo, ti_msg)
                elif ti_msg.get_type() == TITap.get_type():
                    self.handle_ti_tap_msg(conn, topo, ti_msg)
                elif ti_msg.get_type() == TIModifyLink.get_type():
                    self.handle_ti_modifylink_msg(conn, topo, ti_msg)
                else:
                    logging.debug('unexpected VNS TI message received: %s' % ti_msg)
            except TIBadNodeOrPort, e:
                conn.send(e)
Beispiel #40
0
class WMISampler(Thread):
    """
    WMI Sampler.
    """
    def __init__(
        self,
        logger,
        class_name,
        property_names,
        filters="",
        host="localhost",
        namespace="root\\cimv2",
        provider=None,
        username="",
        password="",
        and_props=None,
        timeout_duration=10,
    ):
        Thread.__init__(self)
        # Properties
        self._provider = None
        self._formatted_filters = None

        # Type resolution state
        self._property_counter_types = None

        # Samples
        self._current_sample = None
        self._previous_sample = None

        # Sampling state
        self._sampling = False

        self.logger = logger

        # Connection information
        self.host = host
        self.namespace = namespace
        self.provider = provider
        self.username = username
        self.password = password

        self.is_raw_perf_class = "_PERFRAWDATA_" in class_name.upper()

        # Sampler settings
        #   WMI class, properties, filters and counter types
        #   Include required properties for making calculations with raw
        #   performance counters:
        #   https://msdn.microsoft.com/en-us/library/aa394299(v=vs.85).aspx
        if self.is_raw_perf_class:
            property_names.extend([
                "Timestamp_Sys100NS",
                "Frequency_Sys100NS",
                # IMPORTANT: To improve performance and since they're currently
                # not needed, do not include the other Timestamp/Frequency
                # properties:
                #   - Timestamp_PerfTime
                #   - Timestamp_Object
                #   - Frequency_PerfTime
                #   - Frequency_Object"
            ])

        self.class_name = class_name
        self.property_names = property_names
        self.filters = filters
        self._and_props = and_props if and_props is not None else []
        self._timeout_duration = timeout_duration

        self._runSampleEvent = Event()
        self._sampleComplete = Event()
        self.setDaemon(True)

        self.start()

    def run(self):
        try:
            pythoncom.CoInitialize()
        except Exception as e:
            self.logger.info("exception in CoInitialize {}".format(e))
            raise

        while True:
            self._runSampleEvent.wait()
            self._runSampleEvent.clear()
            if self.is_raw_perf_class and not self._previous_sample:
                self._current_sample = self._query()

            self._previous_sample = self._current_sample
            self._current_sample = self._query()
            self._sampleComplete.set()

    @property
    def provider(self):
        """
        Return the WMI provider.
        """
        return self._provider

    @provider.setter
    def provider(self, value):
        """
        Validate and set a WMI provider. Default to `ProviderArchitecture.DEFAULT`
        """
        result = None

        # `None` defaults to `ProviderArchitecture.DEFAULT`
        defaulted_value = value or ProviderArchitecture.DEFAULT

        try:
            parsed_value = int(defaulted_value)
        except ValueError:
            pass
        else:
            if parsed_value in ProviderArchitecture:
                result = parsed_value

        if result is None:
            self.logger.error(
                u"Invalid '%s' WMI Provider Architecture. The parameter is ignored.",
                value)

        self._provider = result or ProviderArchitecture.DEFAULT

    @property
    def connection(self):
        """
        A property to retrieve the sampler connection information.
        """
        return {
            'host': self.host,
            'namespace': self.namespace,
            'username': self.username,
            'password': self.password
        }

    @property
    def connection_key(self):
        """
        Return an index key used to cache the sampler connection.
        """
        return "{host}:{namespace}:{username}".format(host=self.host,
                                                      namespace=self.namespace,
                                                      username=self.username)

    @property
    def formatted_filters(self):
        """
        Cache and return filters as a comprehensive WQL clause.
        """
        if not self._formatted_filters:
            filters = deepcopy(self.filters)
            self._formatted_filters = self._format_filter(
                filters, self._and_props)
        return self._formatted_filters

    def reset_filter(self, new_filters):
        self.filters = new_filters
        # get rid of the formatted filters so they'll be recalculated
        self._formatted_filters = None

    def sample(self):
        """
        Compute new samples.
        """
        self._sampling = True
        self._runSampleEvent.set()
        self._sampleComplete.wait()
        self._sampleComplete.clear()
        self._sampling = False

    def __len__(self):
        """
        Return the number of WMI Objects in the current sample.
        """
        # No data is returned while sampling
        if self._sampling:
            raise TypeError(u"Sampling `WMISampler` object has no len()")

        return len(self._current_sample)

    def __iter__(self):
        """
        Iterate on the current sample's WMI Objects and format the property values.
        """
        # No data is returned while sampling
        if self._sampling:
            raise TypeError(u"Sampling `WMISampler` object is not iterable")

        if self.is_raw_perf_class:
            # Format required
            for previous_wmi_object, current_wmi_object in zip(
                    self._previous_sample, self._current_sample):
                formatted_wmi_object = self._format_property_values(
                    previous_wmi_object, current_wmi_object)
                yield formatted_wmi_object
        else:
            #  No format required
            for wmi_object in self._current_sample:
                yield wmi_object

    def __getitem__(self, index):
        """
        Get the specified formatted WMI Object from the current sample.
        """
        if self.is_raw_perf_class:
            previous_wmi_object = self._previous_sample[index]
            current_wmi_object = self._current_sample[index]
            formatted_wmi_object = self._format_property_values(
                previous_wmi_object, current_wmi_object)
            return formatted_wmi_object
        else:
            return self._current_sample[index]

    def __eq__(self, other):
        """
        Equality operator is based on the current sample.
        """
        return self._current_sample == other

    def __str__(self):
        """
        Stringify the current sample's WMI Objects.
        """
        return str(self._current_sample)

    def _get_property_calculator(self, counter_type):
        """
        Return the calculator for the given `counter_type`.
        Fallback with `get_raw`.
        """
        calculator = get_raw
        try:
            calculator = get_calculator(counter_type)
        except UndefinedCalculator:
            self.logger.warning(
                u"Undefined WMI calculator for counter_type {counter_type}."
                " Values are reported as RAW.".format(
                    counter_type=counter_type))

        return calculator

    def _format_property_values(self, previous, current):
        """
        Format WMI Object's RAW data based on the previous sample.

        Do not override the original WMI Object !
        """
        formatted_wmi_object = CaseInsensitiveDict()

        for property_name, property_raw_value in iteritems(current):
            counter_type = self._property_counter_types.get(property_name)
            property_formatted_value = property_raw_value

            if counter_type:
                calculator = self._get_property_calculator(counter_type)
                property_formatted_value = calculator(previous, current,
                                                      property_name)

            formatted_wmi_object[property_name] = property_formatted_value

        return formatted_wmi_object

    def get_connection(self):
        """
        Create a new WMI connection
        """
        self.logger.debug(
            u"Connecting to WMI server "
            u"(host={host}, namespace={namespace}, provider={provider}, username={username})."
            .format(host=self.host,
                    namespace=self.namespace,
                    provider=self.provider,
                    username=self.username))

        # Initialize COM for the current thread
        # WARNING: any python COM object (locator, connection, etc) created in a thread
        # shouldn't be used in other threads (can lead to memory/handle leaks if done
        # without a deep knowledge of COM's threading model). Because of this and given
        # that we run each query in its own thread, we don't cache connections
        additional_args = []

        if self.provider != ProviderArchitecture.DEFAULT:
            context = Dispatch("WbemScripting.SWbemNamedValueSet")
            context.Add("__ProviderArchitecture", self.provider)
            additional_args = [None, "", 128, context]

        locator = Dispatch("WbemScripting.SWbemLocator")
        connection = locator.ConnectServer(self.host, self.namespace,
                                           self.username, self.password,
                                           *additional_args)

        return connection

    @staticmethod
    def _format_filter(filters, and_props=[]):
        """
        Transform filters to a comprehensive WQL `WHERE` clause.

        Builds filter from a filter list.
        - filters: expects a list of dicts, typically:
                - [{'Property': value},...] or
                - [{'Property': (comparison_op, value)},...]

                NOTE: If we just provide a value we defailt to '=' comparison operator.
                Otherwise, specify the operator in a tuple as above: (comp_op, value)
                If we detect a wildcard character ('%') we will override the operator
                to use LIKE
        """
        def build_where_clause(fltr):
            f = fltr.pop()
            wql = ""
            while f:
                prop, value = f.popitem()

                if isinstance(value, tuple):
                    oper = value[0]
                    value = value[1]
                elif isinstance(value, string_types) and '%' in value:
                    oper = 'LIKE'
                else:
                    oper = '='

                if isinstance(value, list):
                    if not len(value):
                        continue

                    internal_filter = map(
                        lambda x: (prop, x)
                        if isinstance(x, tuple) else (prop, ('LIKE', x))
                        if '%' in x else (prop, (oper, x)),
                        value,
                    )

                    bool_op = ' OR '
                    for p in and_props:
                        if p.lower() in prop.lower():
                            bool_op = ' AND '
                            break

                    clause = bool_op.join([
                        '{0} {1} \'{2}\''.format(k, v[0], v[1]) if isinstance(
                            v, tuple) else '{0} = \'{1}\''.format(k, v)
                        for k, v in internal_filter
                    ])

                    if bool_op.strip() == 'OR':
                        wql += "( {clause} )".format(clause=clause)
                    else:
                        wql += "{clause}".format(clause=clause)

                else:
                    wql += "{property} {cmp} '{constant}'".format(
                        property=prop, cmp=oper, constant=value)
                if f:
                    wql += " AND "

            # empty list skipped
            if wql.endswith(" AND "):
                wql = wql[:-5]

            if len(fltr) == 0:
                return "( {clause} )".format(clause=wql)

            return "( {clause} ) OR {more}".format(
                clause=wql, more=build_where_clause(fltr))

        if not filters:
            return ""

        return " WHERE {clause}".format(clause=build_where_clause(filters))

    def _query(self):  # pylint: disable=E0202
        """
        Query WMI using WMI Query Language (WQL) & parse the results.

        Returns: List of WMI objects or `TimeoutException`.
        """
        formated_property_names = ",".join(self.property_names)
        wql = "Select {property_names} from {class_name}{filters}".format(
            property_names=formated_property_names,
            class_name=self.class_name,
            filters=self.formatted_filters)
        self.logger.debug(u"Querying WMI: {0}".format(wql))

        try:
            # From: https://msdn.microsoft.com/en-us/library/aa393866(v=vs.85).aspx
            flag_return_immediately = 0x10  # Default flag.
            flag_forward_only = 0x20
            flag_use_amended_qualifiers = 0x20000

            query_flags = flag_return_immediately | flag_forward_only

            # For the first query, cache the qualifiers to determine each
            # propertie's "CounterType"
            includes_qualifiers = self.is_raw_perf_class and self._property_counter_types is None
            if includes_qualifiers:
                self._property_counter_types = CaseInsensitiveDict()
                query_flags |= flag_use_amended_qualifiers

            raw_results = self.get_connection().ExecQuery(
                wql, "WQL", query_flags)

            results = self._parse_results(
                raw_results, includes_qualifiers=includes_qualifiers)

        except pywintypes.com_error:
            self.logger.warning(u"Failed to execute WMI query (%s)",
                                wql,
                                exc_info=True)
            results = []

        return results

    def _parse_results(self, raw_results, includes_qualifiers):
        """
        Parse WMI query results in a more comprehensive form.

        Returns: List of WMI objects
        ```
        [
            {
                'freemegabytes': 19742.0,
                'name': 'C:',
                'avgdiskbytesperwrite': 1536.0
            }, {
                'freemegabytes': 19742.0,
                'name': 'D:',
                'avgdiskbytesperwrite': 1536.0
            }
        ]
        ```
        """
        results = []
        for res in raw_results:
            # Ensure all properties are available. Use case-insensitivity
            # because some properties are returned with different cases.
            item = CaseInsensitiveDict()
            for prop_name in self.property_names:
                item[prop_name] = None

            for wmi_property in res.Properties_:
                # IMPORTANT: To improve performance, only access the Qualifiers
                # if the "CounterType" hasn't already been cached.
                should_get_qualifier_type = (
                    includes_qualifiers
                    and wmi_property.Name not in self._property_counter_types)

                if should_get_qualifier_type:

                    # Can't index into "Qualifiers_" for keys that don't exist
                    # without getting an exception.
                    qualifiers = dict(
                        (q.Name, q.Value) for q in wmi_property.Qualifiers_)

                    # Some properties like "Name" and "Timestamp_Sys100NS" do
                    # not have a "CounterType" (since they're not a counter).
                    # Therefore, they're ignored.
                    if "CounterType" in qualifiers:
                        counter_type = qualifiers["CounterType"]
                        self._property_counter_types[
                            wmi_property.Name] = counter_type

                        self.logger.debug(
                            u"Caching property qualifier CounterType: "
                            "{class_name}.{property_names} = {counter_type}".
                            format(class_name=self.class_name,
                                   property_names=wmi_property.Name,
                                   counter_type=counter_type))
                    else:
                        self.logger.debug(
                            u"CounterType qualifier not found for {class_name}.{property_names}"
                            .format(class_name=self.class_name,
                                    property_names=wmi_property.Name))

                try:
                    item[wmi_property.Name] = float(wmi_property.Value)
                except (TypeError, ValueError):
                    item[wmi_property.Name] = wmi_property.Value

            results.append(item)
        return results
Beispiel #41
0
class Device(object):
    def __init__(self, port_name):
        self.port = serial.Serial(port=port_name,
                                  baudrate=9600,
                                  bytesize=serial.EIGHTBITS,
                                  parity=serial.PARITY_NONE,
                                  stopbits=serial.STOPBITS_ONE,
                                  timeout=1)

        self._read_thread = None
        self._read_thread_exit = Event()

    def start_async_reading(self, cb):
        def do_read():
            while not self._read_thread_exit.is_set():
                msg = self.receive_message()
                if msg is not None:
                    cb(msg)

        self._read_thread_exit.clear()
        self._read_thread = Thread(target=do_read)
        self._read_thread.start()

    def stop_async_reading(self):
        self._read_thread_exit.set()
        if self._read_thread is not None:
            self._read_thread.join()

    def _transmit(self, payload):
        crc = crcmod.predefined.Crc('crc-8')
        crc.update(chr(len(payload)))
        crc.update(payload)

        self.port.write('>')
        self.port.write(chr(len(payload)))
        for b in payload:
            self.port.write(b)
        self.port.write(chr(crc.crcValue))
        self.port.write('\x00')

    def _receive(self):
        c = self.port.read()
        if c == '>':
            length = ord(self.port.read())
            payload = self.port.read(length)

            crc = crcmod.predefined.Crc('crc-8')
            crc.update(chr(length))
            crc.update(payload)

            rx_crc = ord(self.port.read())
            if rx_crc == crc.crcValue and self.port.read() == '\x00':
                return payload

    def send_message(self, node_type, node_id, command, payload=None):
        message = chr(node_type) + chr(node_id) + chr(command)
        if payload is not None:
            message = message + payload
        self._transmit(message)

    def receive_message(self):
        payload = self._receive()
        if payload is None:
            return None

        return ord(payload[0]), ord(payload[1]), ord(payload[2]), payload[3:]
Beispiel #42
0
class LockFile(object):
    """Context manager to protect filepaths with lockfiles.

    .. versionadded:: 1.13

    Creates a lockfile alongside ``protected_path``. Other ``LockFile``
    instances will refuse to lock the same path.

    >>> path = '/path/to/file'
    >>> with LockFile(path):
    >>>     with open(path, 'wb') as fp:
    >>>         fp.write(data)

    Args:
        protected_path (unicode): File to protect with a lockfile
        timeout (float, optional): Raises an :class:`AcquisitionError`
            if lock cannot be acquired within this number of seconds.
            If ``timeout`` is 0 (the default), wait forever.
        delay (float, optional): How often to check (in seconds) if
            lock has been released.

    Attributes:
        delay (float): How often to check (in seconds) whether the lock
            can be acquired.
        lockfile (unicode): Path of the lockfile.
        timeout (float): How long to wait to acquire the lock.

    """
    def __init__(self, protected_path, timeout=0.0, delay=0.05):
        """Create new :class:`LockFile` object."""
        self.lockfile = protected_path + '.lock'
        self._lockfile = None
        self.timeout = timeout
        self.delay = delay
        self._lock = Event()
        atexit.register(self.release)

    @property
    def locked(self):
        """``True`` if file is locked by this instance."""
        return self._lock.is_set()

    def acquire(self, blocking=True):
        """Acquire the lock if possible.

        If the lock is in use and ``blocking`` is ``False``, return
        ``False``.

        Otherwise, check every :attr:`delay` seconds until it acquires
        lock or exceeds attr:`timeout` and raises an :class:`AcquisitionError`.

        """
        if self.locked and not blocking:
            return False

        start = time.time()
        while True:

            # Raise error if we've been waiting too long to acquire the lock
            if self.timeout and (time.time() - start) >= self.timeout:
                raise AcquisitionError('lock acquisition timed out')

            # If already locked, wait then try again
            if self.locked:
                time.sleep(self.delay)
                continue

            # Create in append mode so we don't lose any contents
            if self._lockfile is None:
                self._lockfile = open(self.lockfile, 'a')

            # Try to acquire the lock
            try:
                fcntl.lockf(self._lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
                self._lock.set()
                break
            except IOError as err:  # pragma: no cover
                if err.errno not in (errno.EACCES, errno.EAGAIN):
                    raise

                # Don't try again
                if not blocking:  # pragma: no cover
                    return False

                # Wait, then try again
                time.sleep(self.delay)

        return True

    def release(self):
        """Release the lock by deleting `self.lockfile`."""
        if not self._lock.is_set():
            return False

        try:
            fcntl.lockf(self._lockfile, fcntl.LOCK_UN)
        except IOError:  # pragma: no cover
            pass
        finally:
            self._lock.clear()
            self._lockfile = None
            try:
                os.unlink(self.lockfile)
            except (IOError, OSError):  # pragma: no cover
                pass

            return True

    def __enter__(self):
        """Acquire lock."""
        self.acquire()
        return self

    def __exit__(self, typ, value, traceback):
        """Release lock."""
        self.release()

    def __del__(self):
        """Clear up `self.lockfile`."""
        self.release()  # pragma: no cover
Beispiel #43
0
class MultiThreadedRunner(InProcessRunner):
    """
    Runs a system with a thread for each process.
    """
    def __init__(self, system: System, poll_interval=None, clock_speed=None):
        super(MultiThreadedRunner, self).__init__(system=system)
        self.poll_interval = poll_interval or DEFAULT_POLL_INTERVAL
        assert isinstance(system, System)
        self.threads = {}
        self.clock_speed = clock_speed
        if self.clock_speed:
            self.clock_event = Event()
            self.stop_clock_event = Event()
        else:
            self.clock_event = None
            self.stop_clock_event = None

    def start(self):
        super(MultiThreadedRunner, self).start()
        assert not self.threads, "Already started"

        self.inboxes = {}
        self.outboxes = {}
        self.clock_events = []

        # Setup queues.
        for process_name, upstream_names in self.system.followings.items():
            inbox_id = process_name.lower()
            if inbox_id not in self.inboxes:
                self.inboxes[inbox_id] = Queue()
            for upstream_class_name in upstream_names:
                outbox_id = upstream_class_name.lower()
                if outbox_id not in self.outboxes:
                    self.outboxes[outbox_id] = PromptOutbox()
                if inbox_id not in self.outboxes[outbox_id].downstream_inboxes:
                    self.outboxes[outbox_id].downstream_inboxes[
                        inbox_id] = self.inboxes[inbox_id]

        # Construct application threads.
        for process_name, process in self.system.processes.items():
            process_instance_id = process_name
            if self.clock_event:
                process.clock_event = self.clock_event
                process.tick_interval = 1 / self.clock_speed

            thread = PromptQueuedApplicationThread(
                process=process,
                poll_interval=self.poll_interval,
                inbox=self.inboxes[process_instance_id],
                outbox=self.outboxes[process_instance_id],
                # Todo: Is it better to clock the prompts or the notifications?
                # clock_event=clock_event
            )
            self.threads[process_instance_id] = thread

        # Start application threads.
        for thread in self.threads.values():
            thread.start()

        # Start clock.
        if self.clock_speed:
            self.start_clock()

    def start_clock(self):
        tick_interval = 1 / self.clock_speed
        # print(f"Tick interval: {tick_interval:.6f}s")
        self.last_tick = None
        self.this_tick = None
        self.tick_adjustment = 0

        def set_clock_event():
            if self.stop_clock_event.is_set():
                return

            self.this_tick = time.process_time()

            if self.last_tick:
                tick_size = self.this_tick - self.last_tick

                tick_oversize = tick_size - tick_interval
                tick_oversize_percentage = 100 * (
                    tick_oversize) / tick_interval
                if tick_oversize_percentage > 300:
                    print(
                        f"Warning: Tick over size: {tick_size :.6f}s {tick_oversize_percentage:.2f}%"
                    )

                if abs(tick_oversize_percentage) < 300:
                    self.tick_adjustment += 0.5 * tick_interval * tick_oversize
                    max_tick_adjustment = 0.5 * tick_interval
                    min_tick_adjustment = 0
                    self.tick_adjustment = min(self.tick_adjustment,
                                               max_tick_adjustment)
                    self.tick_adjustment = max(self.tick_adjustment,
                                               min_tick_adjustment)

            self.last_tick = self.this_tick

            self.clock_event.set()
            self.clock_event.clear()

            if not self.stop_clock_event.is_set():
                set_timer()

        def set_timer():
            # print(f"Tick adjustment: {self.tick_adjustment:.6f}")
            if self.last_tick is not None:
                time_since_last_tick = time.process_time() - self.last_tick
                time_remaining = tick_interval - time_since_last_tick
                timer_interval = time_remaining - self.tick_adjustment
                if timer_interval < 0:
                    timer_interval = 0
                    # print("Warning: clock thread is running flat out!")
            else:
                timer_interval = 0
            timer = Timer(timer_interval, set_clock_event)
            timer.start()

        set_timer()

    def handle_prompt(self, prompt):
        self.broadcast_prompt(prompt)

    def broadcast_prompt(self, prompt):
        outbox_id = prompt.process_name
        assert outbox_id in self.outboxes, (outbox_id, self.outboxes.keys())
        self.outboxes[outbox_id].put(prompt)

    def close(self):
        super(MultiThreadedRunner, self).close()

        if self.clock_event is not None:
            self.clock_event.set()

        if self.stop_clock_event is not None:
            self.stop_clock_event.set()

        for thread in self.threads.values():
            thread.inbox.put('QUIT')

        for thread in self.threads.values():
            thread.join(timeout=10)

        self.threads.clear()
Beispiel #44
0
class Monitor(Thread):
    """ Initiates a connection to the database to store telemetry data
        at regular time intervals

        :param running: an event controlling the process operation
        :param appconfig: the application configuration object
        :param q: the telemetry data queue
        :param id: the recorder thread identifier
        :param enabled: a flag indicating if the monitor is enabled
    """
    def __init__(self, q, appconfig, name=""):
        """ Initializes the recorder object

        :param q: the telemetry data queue
        :param appconfig: the application configuration object
        :param name: a name that can be attributed to the monitor
        """

        Thread.__init__(self)
        self.running = Event()
        if name != "":
            self.id = name
        self.q = q
        self.appconfig = appconfig
        self.enabled = False

    def init_connection(self):
        """Initializes the connection to the GPSD service"""

        try:

            # Attempts to create a connection to the GPSD server
            gpsd.connect(self.appconfig.gpsd_ip_address,
                         self.appconfig.gpsd_port)

            return 0

        except Exception as error:
            logger.error(f"Exception: {str(error)}")
            return -1

    def start(self):
        """Starts the monitor thread"""

        self.running.set()
        self.enabled = True
        super(Monitor, self).start()

    def run(self):
        """ Runs the monitor infinite loop """

        # Opens database connection
        rcode = self.init_connection()

        if rcode == 0:

            # insert data in database
            while (self.running.isSet()):

                self.report_current_location()
                time.sleep(self.appconfig.monitor_delay)

        else:
            logger.error("Failed to connect to the GPS deamon")

    def report_current_location(self):
        """ Gets the current location data from the GPSD and reports it to
            the shared queue as a Location object

            :return: 0 if success or -1 if failure or an exception arises
        """

        try:

            # Get current GPS position
            packet = gpsd.get_current()

            # Unpack location parameters
            mode = packet.mode
            latitude = packet.lat
            longitude = packet.lon
            utc_time = packet.time
            track = packet.track
            hspeed = packet.hspeed

            altitude = None
            climb = None

            if packet.mode == 3:
                altitude = packet.alt
                climb = packet.climb

            loc = location.Location(latitude=latitude, longitude=longitude, altitude=altitude, heading=track, \
                climb=climb, horizontal_speed=hspeed, mode=mode, utc_time=utc_time)

            logger.debug(str(loc))  # TODO: remove after DEBUG

            # Put the location instance in the shared queue
            self.q.put(loc)

            return 0

        except Exception as inst:
            logger.error(
                f'Type: {type(inst)} -- Args: {inst.args} -- Instance: {inst}')
            return -1

    def stop(self):
        """Stops the monitor thread"""

        self.running.clear()

        # disable the monitor
        self.enabled = False
Beispiel #45
0
class CarlaRosBridge(object):
    """
    Carla Ros bridge
    """

    CARLA_VERSION = "0.9.6"

    def __init__(self, carla_world, params):
        """
        Constructor

        :param carla_world: carla world object
        :type carla_world: carla.World
        :param params: dict of parameters, see settings.yaml
        :type params: dict
        """
        # check CARLA version
        dist = pkg_resources.get_distribution("carla")
        if LooseVersion(dist.version) < LooseVersion(self.CARLA_VERSION):
            raise ImportError(
                "CARLA version {} or newer required. CARLA version found: {}".
                format(self.CARLA_VERSION, dist))

        self.parameters = params
        self.actors = {}
        self.pseudo_actors = []
        self.carla_world = carla_world
        self.synchronous_mode_update_thread = None
        self.shutdown = Event()
        # set carla world settings
        self.carla_settings = carla_world.get_settings()

        # workaround: settings can only applied within non-sync mode
        if self.carla_settings.synchronous_mode:
            self.carla_settings.synchronous_mode = False
            carla_world.apply_settings(self.carla_settings)

        rospy.loginfo("synchronous_mode: {}".format(
            self.parameters["synchronous_mode"]))
        self.carla_settings.synchronous_mode = self.parameters[
            "synchronous_mode"]
        rospy.loginfo("fixed_delta_seconds: {}".format(
            self.parameters["fixed_delta_seconds"]))
        self.carla_settings.fixed_delta_seconds = self.parameters[
            "fixed_delta_seconds"]
        carla_world.apply_settings(self.carla_settings)

        self.comm = Communication()
        self.update_lock = Lock()

        self.carla_control_queue = queue.Queue()

        self.status_publisher = CarlaStatusPublisher(
            self.carla_settings.synchronous_mode,
            self.carla_settings.fixed_delta_seconds)

        # for waiting for ego vehicle control commands in synchronous mode,
        # their ids are maintained in a list.
        # Before tick(), the list is filled and the loop waits until the list is empty.
        self._all_vehicle_control_commands_received = Event()
        self._expected_ego_vehicle_control_command_ids = []
        self._expected_ego_vehicle_control_command_ids_lock = Lock()

        if self.carla_settings.synchronous_mode:
            self.carla_run_state = CarlaControl.PLAY

            self.carla_control_subscriber = \
                rospy.Subscriber("/carla/control", CarlaControl,
                                 lambda control: self.carla_control_queue.put(control.command))

            self.synchronous_mode_update_thread = Thread(
                target=self._synchronous_mode_update)
            self.synchronous_mode_update_thread.start()
        else:
            self.timestamp_last_run = 0.0

            self.update_actors_queue = queue.Queue(maxsize=1)

            # start thread to update actors
            self.update_actor_thread = Thread(
                target=self._update_actors_thread)
            self.update_actor_thread.start()

            # create initially existing actors
            self.update_actors_queue.put(
                set([x.id for x in self.carla_world.get_snapshot()]))

            # wait for first actors creation to be finished
            self.update_actors_queue.join()

            # register callback to update actors
            self.on_tick_id = self.carla_world.on_tick(self._carla_time_tick)

        # add world info
        self.pseudo_actors.append(
            WorldInfo(carla_world=self.carla_world, communication=self.comm))

        # add global object sensor
        self.pseudo_actors.append(
            ObjectSensor(parent=None,
                         communication=self.comm,
                         actor_list=self.actors,
                         filtered_id=None))
        self.debug_helper = DebugHelper(carla_world.debug)

        # add traffic light pseudo sensor
        self.pseudo_actors.append(
            TrafficLightsSensor(parent=None,
                                communication=self.comm,
                                actor_list=self.actors))

    def destroy(self):
        """
        Function to destroy this object.

        :return:
        """
        rospy.signal_shutdown("")
        self.debug_helper.destroy()
        self.shutdown.set()
        self.carla_control_queue.put(CarlaControl.STEP_ONCE)
        if not self.carla_settings.synchronous_mode:
            if self.on_tick_id:
                self.carla_world.remove_on_tick(self.on_tick_id)
            self.update_actor_thread.join()
        self._update_actors(set())

        rospy.loginfo("Exiting Bridge")

    def process_run_state(self):
        """
        process state changes
        """
        command = None

        # get last command
        while not self.carla_control_queue.empty():
            command = self.carla_control_queue.get()

        while command is not None and not rospy.is_shutdown():
            self.carla_run_state = command

            if self.carla_run_state == CarlaControl.PAUSE:
                # wait for next command
                rospy.loginfo("State set to PAUSED")
                self.status_publisher.set_synchronous_mode_running(False)
                command = self.carla_control_queue.get()
            elif self.carla_run_state == CarlaControl.PLAY:
                rospy.loginfo("State set to PLAY")
                self.status_publisher.set_synchronous_mode_running(True)
                return
            elif self.carla_run_state == CarlaControl.STEP_ONCE:
                rospy.loginfo("Execute single step.")
                self.status_publisher.set_synchronous_mode_running(True)
                self.carla_control_queue.put(CarlaControl.PAUSE)
                return

    def _synchronous_mode_update(self):
        """
        execution loop for synchronous mode
        """
        while not self.shutdown.is_set():
            self.process_run_state()

            if self.parameters[
                    'synchronous_mode_wait_for_vehicle_control_command']:
                # fill list of available ego vehicles
                self._expected_ego_vehicle_control_command_ids = []
                with self._expected_ego_vehicle_control_command_ids_lock:
                    for actor_id, actor in self.actors.iteritems():
                        if isinstance(actor, EgoVehicle):
                            self._expected_ego_vehicle_control_command_ids.append(
                                actor_id)

            frame = self.carla_world.tick()
            world_snapshot = self.carla_world.get_snapshot()

            self.status_publisher.set_frame(frame)
            self.comm.update_clock(world_snapshot.timestamp)
            rospy.logdebug(
                "Tick for frame {} returned. Waiting for sensor data...".
                format(frame))
            self._update(frame, world_snapshot.timestamp.elapsed_seconds)
            rospy.logdebug("Waiting for sensor data finished.")
            self.comm.send_msgs()
            self._update_actors(set([x.id for x in world_snapshot]))

            if self.parameters[
                    'synchronous_mode_wait_for_vehicle_control_command']:
                # wait for all ego vehicles to send a vehicle control command
                if self._expected_ego_vehicle_control_command_ids:
                    if not self._all_vehicle_control_commands_received.wait(1):
                        rospy.logwarn(
                            "Timeout (1s) while waiting for vehicle control commands. "
                            "Missing command from actor ids {}".format(
                                self._expected_ego_vehicle_control_command_ids)
                        )
                    self._all_vehicle_control_commands_received.clear()

    def _carla_time_tick(self, carla_snapshot):
        """
        Private callback registered at carla.World.on_tick()
        to trigger cyclic updates.

        After successful locking the update mutex
        (only perform trylock to respect bridge processing time)
        the clock and the children are updated.
        Finally the ROS messages collected to be published are sent out.

        :param carla_timestamp: the current carla time
        :type carla_timestamp: carla.Timestamp
        :return:
        """
        if not self.shutdown.is_set():
            if self.update_lock.acquire(False):
                if self.timestamp_last_run < carla_snapshot.timestamp.elapsed_seconds:
                    self.timestamp_last_run = carla_snapshot.timestamp.elapsed_seconds
                    self.comm.update_clock(carla_snapshot.timestamp)
                    self.status_publisher.set_frame(carla_snapshot.frame)
                    self._update(carla_snapshot.frame,
                                 carla_snapshot.timestamp.elapsed_seconds)
                    self.comm.send_msgs()
                self.update_lock.release()

            # if possible push current snapshot to update-actors-thread
            try:
                self.update_actors_queue.put_nowait(
                    set([x.id for x in carla_snapshot]))
            except queue.Full:
                pass

    def _update_actors_thread(self):
        """
        execution loop for async mode actor list updates
        """
        while not self.shutdown.is_set():
            try:
                current_actors = self.update_actors_queue.get(timeout=1)
                if current_actors:
                    self._update_actors(current_actors)
                    self.update_actors_queue.task_done()
            except queue.Empty:
                pass

    def _update_actors(self, current_actors):
        """
        update the available actors
        """
        previous_actors = set(self.actors)

        new_actors = current_actors - previous_actors
        deleted_actors = previous_actors - current_actors

        if new_actors:
            for carla_actor in self.carla_world.get_actors(list(new_actors)):
                self._create_actor(carla_actor)

        if deleted_actors:
            for id_to_delete in deleted_actors:
                # remove actor
                actor = self.actors[id_to_delete]
                with self.update_lock:
                    rospy.loginfo(
                        "Remove {}(id={}, parent_id={}, prefix={})".format(
                            actor.__class__.__name__, actor.get_id(),
                            actor.get_parent_id(), actor.get_prefix()))
                    actor.destroy()
                    del self.actors[id_to_delete]

                # remove pseudo-actors that have actor as parent
                updated_pseudo_actors = []
                for pseudo_actor in self.pseudo_actors:
                    if pseudo_actor.get_parent_id() == id_to_delete:
                        rospy.loginfo(
                            "Remove {}(parent_id={}, prefix={})".format(
                                pseudo_actor.__class__.__name__,
                                pseudo_actor.get_parent_id(),
                                pseudo_actor.get_prefix()))
                        pseudo_actor.destroy()
                        del pseudo_actor
                    else:
                        updated_pseudo_actors.append(pseudo_actor)
                self.pseudo_actors = updated_pseudo_actors

        # publish actor list on change
        if new_actors or deleted_actors:
            self.publish_actor_list()

    def publish_actor_list(self):
        """
        publish list of carla actors
        :return:
        """
        ros_actor_list = CarlaActorList()

        for actor_id in self.actors:
            actor = self.actors[actor_id].carla_actor
            ros_actor = CarlaActorInfo()
            ros_actor.id = actor.id
            ros_actor.type = actor.type_id
            try:
                ros_actor.rolename = str(actor.attributes.get('role_name'))
            except ValueError:
                pass

            if actor.parent:
                ros_actor.parent_id = actor.parent.id
            else:
                ros_actor.parent_id = 0

            ros_actor_list.actors.append(ros_actor)

        self.comm.publish_message("/carla/actor_list",
                                  ros_actor_list,
                                  is_latched=True)

    def _create_actor(self, carla_actor):  # pylint: disable=too-many-branches,too-many-statements
        """
        create an actor
        """
        parent = None
        if carla_actor.parent:
            if carla_actor.parent.id in self.actors:
                parent = self.actors[carla_actor.parent.id]
            else:
                parent = self._create_actor(carla_actor.parent)

        actor = None
        pseudo_actors = []
        if carla_actor.type_id.startswith('traffic'):
            if carla_actor.type_id == "traffic.traffic_light":
                actor = TrafficLight(carla_actor, parent, self.comm)
            else:
                actor = Traffic(carla_actor, parent, self.comm)
        elif carla_actor.type_id.startswith("vehicle"):
            if carla_actor.attributes.get('role_name')\
                    in self.parameters['ego_vehicle']['role_name']:
                actor = EgoVehicle(carla_actor, parent, self.comm,
                                   self._ego_vehicle_control_applied_callback)
                pseudo_actors.append(
                    ObjectSensor(parent=actor,
                                 communication=self.comm,
                                 actor_list=self.actors,
                                 filtered_id=carla_actor.id))
            else:
                actor = Vehicle(carla_actor, parent, self.comm)
        elif carla_actor.type_id.startswith("sensor"):
            if carla_actor.type_id.startswith("sensor.camera"):
                if carla_actor.type_id.startswith("sensor.camera.rgb"):
                    actor = RgbCamera(carla_actor, parent, self.comm,
                                      self.carla_settings.synchronous_mode)
                elif carla_actor.type_id.startswith("sensor.camera.depth"):
                    actor = DepthCamera(carla_actor, parent, self.comm,
                                        self.carla_settings.synchronous_mode)
                elif carla_actor.type_id.startswith(
                        "sensor.camera.semantic_segmentation"):
                    actor = SemanticSegmentationCamera(
                        carla_actor, parent, self.comm,
                        self.carla_settings.synchronous_mode)
                else:
                    actor = Camera(carla_actor, parent, self.comm,
                                   self.carla_settings.synchronous_mode)
            elif carla_actor.type_id.startswith("sensor.lidar"):
                actor = Lidar(carla_actor, parent, self.comm,
                              self.carla_settings.synchronous_mode)
            elif carla_actor.type_id.startswith("sensor.other.radar"):
                actor = Radar(carla_actor, parent, self.comm,
                              self.carla_settings.synchronous_mode)
            elif carla_actor.type_id.startswith("sensor.other.gnss"):
                actor = Gnss(carla_actor, parent, self.comm,
                             self.carla_settings.synchronous_mode)
            elif carla_actor.type_id.startswith("sensor.other.imu"):
                actor = ImuSensor(carla_actor, parent, self.comm,
                                  self.carla_settings.synchronous_mode)
            elif carla_actor.type_id.startswith("sensor.other.collision"):
                actor = CollisionSensor(carla_actor, parent, self.comm,
                                        self.carla_settings.synchronous_mode)
            elif carla_actor.type_id.startswith("sensor.other.lane_invasion"):
                actor = LaneInvasionSensor(
                    carla_actor, parent, self.comm,
                    self.carla_settings.synchronous_mode)
            else:
                actor = Sensor(carla_actor, parent, self.comm,
                               self.carla_settings.synchronous_mode)
        elif carla_actor.type_id.startswith("spectator"):
            actor = Spectator(carla_actor, parent, self.comm)
        elif carla_actor.type_id.startswith("walker"):
            actor = Walker(carla_actor, parent, self.comm)
        else:
            actor = Actor(carla_actor, parent, self.comm)

        rospy.loginfo("Created {}(id={}, parent_id={},"
                      " type={}, prefix={}, attributes={})".format(
                          actor.__class__.__name__, actor.get_id(),
                          actor.get_parent_id(), carla_actor.type_id,
                          actor.get_prefix(), carla_actor.attributes))
        with self.update_lock:
            self.actors[carla_actor.id] = actor

        for pseudo_actor in pseudo_actors:
            rospy.loginfo("Created {}(parent_id={}, prefix={})".format(
                pseudo_actor.__class__.__name__, pseudo_actor.get_parent_id(),
                pseudo_actor.get_prefix()))
            with self.update_lock:
                self.pseudo_actors.append(pseudo_actor)

        return actor

    def run(self):
        """
        Run the bridge functionality.

        Registers on shutdown callback at rospy and spins ROS.

        :return:
        """
        rospy.on_shutdown(self.on_shutdown)
        rospy.spin()

    def on_shutdown(self):
        """
        Function to be called on shutdown.

        This function is registered at rospy as shutdown handler.

        """
        rospy.loginfo("Shutdown requested")
        self.destroy()

    def _update(self, frame_id, timestamp):
        """
        update all actors
        :return:
        """
        # update all pseudo actors
        for actor in self.pseudo_actors:
            actor.update(frame_id, timestamp)

        # update all carla actors
        for actor_id in self.actors:
            try:
                self.actors[actor_id].update(frame_id, timestamp)
            except RuntimeError as e:
                rospy.logwarn("Update actor {}({}) failed: {}".format(
                    self.actors[actor_id].__class__.__name__, actor_id, e))
                continue

    def _ego_vehicle_control_applied_callback(self, ego_vehicle_id):
        if not self.carla_settings.synchronous_mode or \
                not self.parameters['synchronous_mode_wait_for_vehicle_control_command']:
            return
        with self._expected_ego_vehicle_control_command_ids_lock:
            if ego_vehicle_id in self._expected_ego_vehicle_control_command_ids:
                self._expected_ego_vehicle_control_command_ids.remove(
                    ego_vehicle_id)
            else:
                rospy.logwarn(
                    "Unexpected vehicle control command received from {}".
                    format(ego_vehicle_id))
            if not self._expected_ego_vehicle_control_command_ids:
                self._all_vehicle_control_commands_received.set()
class DownloadDialogController(object):
    def __init__(self, dialog, application, parentPage):
        self._dialog = dialog
        self._application = application
        self._parentPage = parentPage

        global _
        _ = get_()

        self._downloadDir = None

        self._runEvent = Event()
        self._thread = None

        self._logIndex = 1

        self._dialog.Bind(wx.EVT_BUTTON, self._onOk, id=wx.ID_OK)
        self._dialog.Bind(wx.EVT_BUTTON, self._onCancel, id=wx.ID_CANCEL)
        self._dialog.selectFileButton.Bind(wx.EVT_BUTTON, self._onSelectFile)

        self._dialog.Bind(webpage.events.EVT_UPDATE_LOG, self._onLogUpdate)

        self._dialog.Bind(webpage.events.EVT_DOWNLOAD_ERROR,
                          self._onDownloadError)

        self._dialog.Bind(webpage.events.EVT_DOWNLOAD_FINISH,
                          self._onDownloadFinish)

    def showDialog(self):
        """
        The method show the dialog and return result of the ShowModal() method
        """
        if self._application.wikiroot is None:
            return

        self._loadState()

        result = self._dialog.ShowModal()
        if result == wx.ID_OK:
            self._saveState()

        return result

    def addToLog(self, text):
        logString = u'[{index:03g}] {text}'.format(index=self._logIndex,
                                                   text=text)
        self._logIndex += 1

        self._dialog.logText.AppendText(logString)

        count = self._dialog.logText.GetLastPosition()
        self._dialog.logText.ShowPosition(count)

    def resetLog(self):
        self._logIndex = 1
        self._dialog.logText.Value = u''

    def _loadState(self):
        tagslist = TagsList(self._application.wikiroot)
        self._dialog.setTagsList(tagslist)
        if (self._parentPage is not None and
                self._parentPage.parent is not None):
            self._dialog.tags = self._parentPage.tags

        clipboardText = getClipboardText()
        if clipboardText is not None and isLink(clipboardText):
            self._dialog.url = clipboardText
            self._dialog.urlText.SetSelection(0, len(clipboardText))

    def _saveState(self):
        pass

    def _onLogUpdate(self, event):
        self.addToLog(event.text)

    def _removeDownloadDir(self):
        if self._downloadDir is not None and os.path.exists(self._downloadDir):
            try:
                rmtree(self._downloadDir)
            except EnvironmentError:
                self.addToLog(_(u"Can't remove temp directory"))

    def _onOk(self, event):
        self.resetLog()
        url = self._dialog.url

        if len(url) == 0:
            MessageBox(_(u'Enter link for downloading'),
                       _(u"Error"),
                       wx.ICON_ERROR | wx.OK)
            self._dialog.urlText.SetFocus()
            return

        if os.path.isfile(url):
            url = url.replace(u'\\', u'/')
            if not url.startswith(u'/'):
                url = u'/' + url

            url = u'file://' + url

        if self._thread is None:
            self._removeDownloadDir()
            self._downloadDir = mkdtemp(prefix=u'webpage_tmp_')

            self._runEvent.set()
            self._thread = DownloadThread(self._dialog,
                                          self._runEvent,
                                          self._downloadDir,
                                          url)
            self._thread.start()

    def _onCancel(self, event):
        self._runEvent.clear()
        if self._thread is not None:
            self._thread.join()

        self._removeDownloadDir()
        event.Skip()

    def _onSelectFile(self, event):
        with TestedFileDialog(
                self._dialog,
                wildcard=_("HTML files(*.html; *.htm)|*.html;*.htm|All files|*.*"),
                style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
            if fileDialog.ShowModal() == wx.ID_OK:
                self._dialog.url = fileDialog.GetPath()

    def _onDownloadError(self, event):
        self._onLogUpdate(event)
        self._thread = None
        self._removeDownloadDir()

    def _onDownloadFinish(self, event):
        self._thread = None
        if not self._runEvent.is_set():
            self.addToLog(_(u"Page creation is canceled."))
            self._removeDownloadDir()
            return

        parentPage = self._parentPage
        title = event.title if event.title is not None else _(u'Web page')
        favicon = event.favicon
        tags = self._dialog.tags
        content = event.content
        url = event.url
        tmpStaticDir = event.staticPath
        logContent = self._dialog.logText.Value

        titleDlg = wx.TextEntryDialog(self._dialog,
                                      _(u'Enter a title for the page'),
                                      _(u'Page title'),
                                      title)
        titleDlg.SetMinSize((450, 150))

        if titleDlg.ShowModal() == wx.ID_OK:
            title = titleDlg.GetValue()
        else:
            self.addToLog(_(u"Page creation is canceled."))
            self._removeDownloadDir()
            return

        try:
            page = WebPageFactory().createWebPage(parentPage,
                                                  title,
                                                  favicon,
                                                  tags,
                                                  content,
                                                  url,
                                                  tmpStaticDir,
                                                  logContent)
            self._dialog.EndModal(wx.ID_OK)
            self._application.selectedPage = page
        except EnvironmentError:
            self.addToLog(_(u"Can't create the page. Perhaps the title of the page is too long."))
        finally:
            self._removeDownloadDir()
Beispiel #47
0
class GenericWebsocket:
    """
    Websocket object used to contain the base functionality of a websocket.
    Inlcudes an event emitter and a standard websocket client.
    """

    def __init__(self, host, logLevel='INFO', max_retries=5, create_event_emitter=None):
        self.host = host
        self.logger = CustomLogger('BfxWebsocket', logLevel=logLevel)
        # overide 'error' event to stop it raising an exception
        # self.events.on('error', self.on_error)
        self.ws = None
        self.max_retries = max_retries
        self.attempt_retry = True
        self.sockets = {}
        # start seperate process for the even emitter
        event_worker.set()
        create_ee = create_event_emitter or _start_event_worker
        self.events = create_ee()
        self.execute = Event()

    def run(self):
        """
        Starte the websocket connection. This functions spawns the initial socket
        thread and connection.
        """
        self._start_new_socket()

    def stop(self):
        self.execute.clear()
        event_worker.clear()
        for _,socket in self.sockets.items():
            socket.set_disconnected()
            if(socket.ws): asyncio.ensure_future(socket.ws.close_connection())
#            self.worker_new_sockets[sId].cancel()
#            socket.ws.fail_connection()
#            asyncio.ensure_future(socket.ws.close())
#            asyncio.ensure_future(self.sockets[sId].ws.ping())
#            loop.run_until_complete(self.sockets[sId].ws.close_connection())
        self.logger.info("GenericWebsocket stop")

    def get_task_executable(self):
        """
        Get the run indefinitely asyncio task
        """
        return self._run_socket()

    def _start_new_socket(self, socketId=None):
        if not socketId:
            socketId = len(self.sockets)
        def start_loop(loop):
            asyncio.set_event_loop(loop)
            loop.run_until_complete(self._run_socket())
        worker_loop = asyncio.new_event_loop()
        worker_new_socket = Thread(name="worker_new_socket",target=start_loop, args=(worker_loop,))
        worker_new_socket.daemon = True
        worker_new_socket.start()
        return socketId

    def _wait_for_socket(self, socket_id):
        """
        Block until the given socket connection is open
        """
        while True:
            socket = self.sockets.get(socket_id, False)
            if socket:
                if socket.isConnected and socket.ws:
                    return
            time.sleep(0.01)

    def get_socket(self, socketId):
        return self.sockets[socketId]

    def get_authenticated_socket(self):
        for socketId in self.sockets:
            if self.sockets[socketId].isAuthenticated:
                return self.sockets[socketId]
        return None

    async def _run_socket(self):
        retries = 0
        sId =  len(self.sockets)
        s = Socket(sId)
        self.sockets[sId] = s
        self.execute.set()
        while retries < self.max_retries and self.attempt_retry and self.execute.is_set():
            try:
                async with websockets.connect(self.host) as websocket:
                    self.sockets[sId].set_websocket(websocket)
                    self.sockets[sId].set_connected()
                    self.logger.info("Websocket connected to {}".format(self.host))
                    retries = 0
                    while self.execute.is_set():
                        # optimization - wait 0 seconds to force the async queue
                        # to be cleared before continuing
                        await asyncio.sleep(0.001)
                        message = await websocket.recv()
                        await self.on_message(sId, message)
                    self.logger.info("Websocket disconnect")
            except (ConnectionClosed, socket.error) as e:
                self.sockets[sId].set_disconnected()
                if self.sockets[sId].isAuthenticated:
                    self.sockets[sId].set_unauthenticated()
                self._emit('disconnected')
                if (not self.attempt_retry):
                    return
                self.logger.error(str(e))
                retries += 1
                # wait 5 seconds befor retrying
                self.logger.info("Waiting 5 seconds before retrying...")
                await asyncio.sleep(5)
                self.logger.info("Reconnect attempt {}/{}".format(retries, self.max_retries))
        self.logger.info("Unable to connect to websocket.")
        self._emit('stopped')

    def remove_all_listeners(self, event):
        """
        Remove all listeners from event emitter
        """
        self.events.remove_all_listeners(event)

    def on(self, event, func=None):
        """
        Add a new event to the event emitter
        """
        if not func:
            return self.events.on(event)
        self.events.on(event, func)

    def once(self, event, func=None):
        """
        Add a new event to only fire once to the event
        emitter
        """
        if not func:
            return self.events.once(event)
        self.events.once(event, func)

    def _emit(self, event, *args, **kwargs):
        self.events.emit(event, *args, **kwargs)

    async def on_error(self, error):
        """
        On websocket error print and fire event
        """
        self.logger.error(error)

    async def on_close(self):
        """
        On websocket close print and fire event. This is used by the data server.
        """
        self.logger.info("Websocket closed.")
        self.attempt_retry = False
        for key, socket in self.sockets.items():
            await socket.ws.close()
        self._emit('done')

    async def on_open(self):
        """
        On websocket open
        """
        pass

    async def on_message(self, message):
        """
        On websocket message
        """
        pass
Beispiel #48
0
class ConcurrentTests(QuerySanityTests, QueryTests):
    def setUp(self):
        super(ConcurrentTests, self).setUp()
        self.thread_crashed = Event()
        self.thread_stopped = Event()
        self.num_threads = self.input.param("num_threads", 4)
        self.test_to_run = self.input.param("test_to_run", "test_max")
        self.ops = self.input.param("ops", None)
        self.query_buckets = self.get_query_buckets(check_all_buckets=True)

    def suite_setUp(self):
        super(ConcurrentTests, self).suite_setUp()

    def tearDown(self):
        rest = RestConnection(self.master)
        if rest._rebalance_progress_status() == 'running':
            self.log.warning("rebalancing is still running, test should be verified")
            stopped = rest.stop_rebalance()
            self.assertTrue(stopped, msg="unable to stop rebalance")
        try:
            super(ConcurrentTests, self).tearDown()
        except:
            pass
        # ClusterOperationHelper.cleanup_cluster(self.servers)
        # self.sleep(10)

    def suite_tearDown(self):
        super(ConcurrentTests, self).suite_tearDown()

    def test_concurrent_queries(self):
        task_ops = None
        if self.ops == 'rebalance':
            task_ops = self.cluster.async_rebalance(self.servers[:self.nodes_init],
                                                    self.servers[self.nodes_init:self.nodes_init + self.nodes_in], [])
        elif self.ops == 'failover':
            self.cluster.rebalance(self.servers[:1], self.servers[1:self.nodes_init], [])
            servr_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]
            self.cluster.failover(self.servers[:self.nodes_init], servr_out)
            task_ops = self.cluster.async_rebalance(self.servers[:self.nodes_init],
                                                    [], servr_out)
        query_threads = []
        for n in range(self.num_threads):
            t = StoppableThread(target=self.query_thread,
                                name="query-{0}".format(n),
                                args=(self.test_to_run,))
            query_threads.append(t)
            t.start()

        while True:
            if not query_threads:
                break
            self.thread_stopped.wait(60)
            if self.thread_crashed.is_set():
                self.log.error("Will stop all threads!")
                for t in query_threads:
                    t.stop()
                    self.log.error("Thread %s stopped" % str(t))
                break
            else:
                query_threads = [d for d in query_threads if d.is_alive()]
                self.log.info("Current amount of threads %s" % len(query_threads))
                self.thread_stopped.clear()
        if self.thread_crashed.is_set():
            self.fail("Test failed, see logs above!")
        if task_ops:
            task_ops.result()

    def test_concurrent_queries_hints(self):
        task_ops = None
        if self.ops == 'rebalance':
            task_ops = self.cluster.async_rebalance(self.servers[:self.nodes_init],
                                                    self.servers[self.nodes_init:self.nodes_init + self.nodes_in], [])
        elif self.ops == 'failover':
            self.cluster.rebalance(self.servers[:1], self.servers[1:self.nodes_init], [])
            servr_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]
            self.cluster.failover(self.servers[:self.nodes_init], servr_out)
            task_ops = self.cluster.async_rebalance(self.servers[:self.nodes_init],
                                                    [], servr_out)
        index_name_prefix = 'hint' + str(uuid.uuid4())[:4]
        created_indexes = []
        try:
            fields = self.input.param("index_field", '').replace(':', ',')
            fields = fields.split(';')
            for attr in fields:
                for bucket, query_bucket in zip(self.buckets, self.query_buckets):
                    ind_name = attr.split('.')[0].split('[')[0].replace(',', '_')
                    self.query = "CREATE INDEX %s_%s_%s ON %s(%s) USING %s" % (index_name_prefix,
                                                                               ind_name,
                                                                               fields.index(attr),
                                                                               query_bucket, attr, self.index_type)
                    # if self.gsi_type:
                    #     self.query += " WITH {'index_type': 'memdb'}"
                    self.run_cbq_query()
                    self._wait_for_index_online(bucket, '%s_%s_%s' % (index_name_prefix, ind_name,
                                                                      fields.index(attr)))
                    created_indexes.append('%s_%s_%s' % (index_name_prefix, ind_name,
                                                         fields.index(attr)))
            for ind in created_indexes:
                self.hint_index = ind
                query_threads = []
                for n in range(self.num_threads):
                    t = StoppableThread(target=self.query_thread,
                                        name="query-{0}".format(n),
                                        args=(self.test_to_run,))
                    query_threads.append(t)
                    t.start()

                while True:
                    if not query_threads:
                        break
                    self.thread_stopped.wait(60)
                    if self.thread_crashed.is_set():
                        self.log.error("Will stop all threads!")
                        for t in query_threads:
                            t.stop()
                            self.log.error("Thread %s stopped" % str(t))
                        break
                    else:
                        query_threads = [d for d in query_threads if d.is_alive()]
                        self.log.info("Current amount of threads %s" % len(query_threads))
                        self.thread_stopped.clear()
                if self.thread_crashed.is_set():
                    self.fail("Test failed, see logs above!")
                if task_ops:
                    task_ops.result()
        finally:
            for query_bucket in self.query_buckets:
                for index_name in set(created_indexes):
                    self.query = "DROP INDEX %s ON %s USING %s" % (index_name, query_bucket, self.index_type)
                    try:
                        self.run_cbq_query()
                    except:
                        pass
Beispiel #49
0
class AutoAlignmentPlugin(octoprint.plugin.EventHandlerPlugin):

    def __init__(self):
        self.aligning = False
        self.event = Event()
        self.event.set()
        self._write_buffer = []
        self._fake_ok = False
        self._temp_resp_len = 0
        self.g = None

    def on_event(self, event, payload):
        if event == 'PrintCancelled':
            self.relinquish_control()
        if event == 'PrintPaused':
            if self.g is not None:
                self.g._p.paused = True
        if event == 'PrintResumed':
            if self.g is not None:
                self.g._p.paused = False
        if event == 'Disconnected':
            if self.g is not None:
                self.g.teardown(wait=False)
            self.aligning = False
            self._fake_ok = False
            self._temp_resp_len = 0
            self.event.set()

    def print_started_sentinel(self, comm, phase, cmd, cmd_type, gcode, *args, **kwargs):
        if 'M900' in cmd:
            self.event.clear()
            self.aligning = True
            self._align_thread = Thread(target=self.align, name='Align')
            sleep(1)
            self._align_thread.start()
            return None
        return cmd

    def align(self):
        self._logger.info('Alignment started')
        self.g = g = G(
            print_lines=False,
            aerotech_include=False,
            direct_write=True,
            direct_write_mode='serial',
            layer_height = 0.19,
            extrusion_width = 0.4,
            filament_diameter = 1.75,
            extrusion_multiplier = 1.00,
            setup=False,
        )
        g._p = Printer()
        g._p.connect(self.s)
        g._p.start()


        self.AA = AA = AlignmentAutomator(g)
        AA.full_alignment()


        #g.write('G91')
        #g.feed(500)
        #g.move(10)
        #g.move(-10)
        #g.move(10)
        #g.move(-1)
        #g.move(10)
        #g.move(-1)
        #g.move(10)
        #g.feed(6000)

        self.relinquish_control()

    def relinquish_control(self):
        self._logger.info('Resetting Line Number to 0')
        self.g._p.reset_linenumber()
        self._logger.info('Tearing down, waiting for buffer to clear.')
        self.g.teardown()
        self.g = None
        self._logger.info('teardown called, returning control to OctoPrint')
        self.aligning = False
        self._fake_ok = True
        self._temp_resp_len = 0
        self.event.set()

    def readline(self, *args, **kwargs):
        out = True
        if self.aligning:
            out = self.event.wait(2)
        if out:
            if self._fake_ok:
                self._fake_ok = False
                return 'ok\n'
            resp = self.s.readline(*args, **kwargs)
        else:
            if len(self.g._p.temp_readings) > self._temp_resp_len:
                self._temp_resp_len = len(self.g._p.temp_readings)
                resp = self.g._p.temp_readings[-1]
            else:
                resp = 'echo: Alignment script is running' if self.AA.offsetstring is None else self.AA.offsetstring
        return resp

    def write(self, data):
        if not self.aligning:
            return self.s.write(data)
        else:
            self._logger.warn('Write called when Mecode has control: ' + str(data))

    def close(self):
        return self.s.close()

    def serial_factory(self, comm_instance, port, baudrate, connection_timeout):
        if port == 'VIRTUAL':
            return None
        if port is None or port == 'AUTO':
            # no known port, try auto detection
            comm_instance._changeState(comm_instance.STATE_DETECT_SERIAL)
            serial_obj = comm_instance._detectPort(False)
            if serial_obj is None:
                comm_instance._log("Failed to autodetect serial port")
                comm_instance._errorValue = 'Failed to autodetect serial port.'
                comm_instance._changeState(comm_instance.STATE_ERROR)
                return None

        # connect to regular serial port
        comm_instance._log("Connecting to: %s" % port)
        if baudrate == 0:
            serial_obj = serial.Serial(str(port), 115200, timeout=connection_timeout, writeTimeout=10000, parity=serial.PARITY_ODD)
        else:
            serial_obj = serial.Serial(str(port), baudrate, timeout=connection_timeout, writeTimeout=10000, parity=serial.PARITY_ODD)
        serial_obj.close()
        serial_obj.parity = serial.PARITY_NONE
        serial_obj.open()

        self.s = serial_obj
        return self
Beispiel #50
0
class Module(MgrModule):
    OPTIONS = [
        {
            'name': 'enable_monitoring',
            'default': str(False),
        },
        {
            'name': 'scrape_frequency',
            'default': str(86400),
        },
        {
            'name': 'pool_name',
            'default': 'device_health_metrics',
        },
        {
            'name': 'retention_period',
            'default': str(86400 * 14),
        },
        {
            'name': 'mark_out_threshold',
            'default': str(86400 * 14 * 2),
        },
        {
            'name': 'warn_threshold',
            'default': str(86400 * 14 * 2),
        },
        {
            'name': 'self_heal',
            'default': str(True),
        },
        {
            'name': 'sleep_interval',
            'default': str(600),
        },
    ]

    COMMANDS = [
        {
            "cmd": "device query-daemon-health-metrics "
                   "name=who,type=CephString",
            "desc": "Get device health metrics for a given daemon (OSD)",
            "perm": "r"
        },
        {
            "cmd": "device scrape-daemon-health-metrics "
                   "name=who,type=CephString",
            "desc": "Scrape and store device health metrics "
                    "for a given daemon",
            "perm": "r"
        },
        {
            "cmd": "device scrape-health-metrics "
                   "name=devid,type=CephString,req=False",
            "desc": "Scrape and store health metrics",
            "perm": "r"
        },
        {
            "cmd": "device get-health-metrics "
                   "name=devid,type=CephString "
                   "name=sample,type=CephString,req=False",
            "desc": "Show stored device metrics for the device",
            "perm": "r"
        },
        {
            "cmd": "device check-health",
            "desc": "Check life expectancy of devices",
            "perm": "rw",
        },
        {
            "cmd": "device monitoring on",
            "desc": "Enable device health monitoring",
            "perm": "rw",
        },
        {
            "cmd": "device monitoring off",
            "desc": "Disable device health monitoring",
            "perm": "rw",
        },
    ]

    def __init__(self, *args, **kwargs):
        super(Module, self).__init__(*args, **kwargs)

        # options
        for opt in self.OPTIONS:
            setattr(self, opt['name'], opt['default'])

        # other
        self.run = True
        self.event = Event()

    def handle_command(self, _, cmd):
        self.log.error("handle_command")

        if cmd['prefix'] == 'device query-daemon-health-metrics':
            who = cmd.get('who', '')
            if who[0:4] != 'osd.':
                return -errno.EINVAL, '', 'not a valid <osd.NNN> id'
            osd_id = who[4:]
            result = CommandResult('')
            self.send_command(result, 'osd', osd_id, json.dumps({
                'prefix': 'smart',
                'format': 'json',
            }), '')
            r, outb, outs = result.wait()
            return r, outb, outs
        elif cmd['prefix'] == 'device scrape-daemon-health-metrics':
            who = cmd.get('who', '')
            if who[0:4] != 'osd.':
                return -errno.EINVAL, '', 'not a valid <osd.NNN> id'
            osd_id = int(who[4:])
            return self.scrape_osd(osd_id)
        elif cmd['prefix'] == 'device scrape-health-metrics':
            if 'devid' in cmd:
                return self.scrape_device(cmd['devid'])
            return self.scrape_all()
        elif cmd['prefix'] == 'device get-health-metrics':
            return self.show_device_metrics(cmd['devid'], cmd.get('sample'))
        elif cmd['prefix'] == 'device check-health':
            return self.check_health()
        elif cmd['prefix'] == 'device monitoring on':
            self.set_config('enable_monitoring', 'true')
            self.event.set()
            return 0, '', ''
        elif cmd['prefix'] == 'device monitoring off':
            self.set_config('enable_monitoring', 'false')
            self.set_health_checks({})  # avoid stuck health alerts
            return 0, '', ''
        else:
            # mgr should respect our self.COMMANDS and not call us for
            # any prefix we don't advertise
            raise NotImplementedError(cmd['prefix'])

    def self_test(self):
        self.refresh_config()
        osdmap = self.get('osd_map')
        osd_id = osdmap['osds'][0]['osd']
        osdmeta = self.get('osd_metadata')
        devs = osdmeta.get(str(osd_id), {}).get('device_ids')
        if devs:
            devid = devs.split()[0].split('=')[1]
            (r, before, err) = self.show_device_metrics(devid, '')
            assert r == 0
            (r, out, err) = self.scrape_device(devid)
            assert r == 0
            (r, after, err) = self.show_device_metrics(devid, '')
            assert r == 0
            assert before != after

    def refresh_config(self):
        for opt in self.OPTIONS:
            setattr(self,
                    opt['name'],
                    self.get_config(opt['name']) or opt['default'])
            self.log.debug(' %s = %s', opt['name'], getattr(self, opt['name']))

    def serve(self):
        self.log.info("Starting")

        last_scrape = None
        ls = self.get_store('last_scrape')
        if ls:
            try:
                last_scrape = datetime.strptime(ls, TIME_FORMAT)
            except ValueError as e:
                pass
        self.log.debug('Last scrape %s', last_scrape)

        while self.run:
            self.refresh_config()

            if self.enable_monitoring == 'true' or self.enable_monitoring == 'True':
                self.log.debug('Running')
                self.check_health()

                now = datetime.utcnow()
                if not last_scrape:
                    next_scrape = now
                else:
                    # align to scrape interval
                    scrape_frequency = int(self.scrape_frequency) or 86400
                    seconds = (last_scrape - datetime.utcfromtimestamp(0)).total_seconds()
                    seconds -= seconds % scrape_frequency
                    seconds += scrape_frequency
                    next_scrape = datetime.utcfromtimestamp(seconds)
                if last_scrape:
                    self.log.debug('Last scrape %s, next scrape due %s',
                                   last_scrape.strftime(TIME_FORMAT),
                                   next_scrape.strftime(TIME_FORMAT))
                else:
                    self.log.debug('Last scrape never, next scrape due %s',
                                   next_scrape.strftime(TIME_FORMAT))
                if now >= next_scrape:
                    self.scrape_all()
                    last_scrape = now
                    self.set_store('last_scrape', last_scrape.strftime(TIME_FORMAT))

            # sleep
            sleep_interval = int(self.sleep_interval) or 60
            self.log.debug('Sleeping for %d seconds', sleep_interval)
            ret = self.event.wait(sleep_interval)
            self.event.clear()

    def shutdown(self):
        self.log.info('Stopping')
        self.run = False
        self.event.set()

    def open_connection(self, create_if_missing=True):
        pools = self.rados.list_pools()
        is_pool = False
        for pool in pools:
            if pool == self.pool_name:
                is_pool = True
                break
        if not is_pool:
            if not create_if_missing:
                return None
            self.log.debug('create %s pool' % self.pool_name)
            # create pool
            result = CommandResult('')
            self.send_command(result, 'mon', '', json.dumps({
                'prefix': 'osd pool create',
                'format': 'json',
                'pool': self.pool_name,
                'pg_num': 1,
            }), '')
            r, outb, outs = result.wait()
            assert r == 0

            # set pool application
            result = CommandResult('')
            self.send_command(result, 'mon', '', json.dumps({
                'prefix': 'osd pool application enable',
                'format': 'json',
                'pool': self.pool_name,
                'app': 'mgr_devicehealth',
            }), '')
            r, outb, outs = result.wait()
            assert r == 0

        ioctx = self.rados.open_ioctx(self.pool_name)
        return ioctx

    def scrape_osd(self, osd_id):
        ioctx = self.open_connection()
        raw_smart_data = self.do_scrape_osd(osd_id)
        if raw_smart_data:
            for device, raw_data in raw_smart_data.items():
                data = self.extract_smart_features(raw_data)
                self.put_device_metrics(ioctx, device, data)
        ioctx.close()
        return 0, "", ""

    def scrape_all(self):
        osdmap = self.get("osd_map")
        assert osdmap is not None
        ioctx = self.open_connection()
        did_device = {}
        for osd in osdmap['osds']:
            osd_id = osd['osd']
            raw_smart_data = self.do_scrape_osd(osd_id)
            if not raw_smart_data:
                continue
            for device, raw_data in raw_smart_data.items():
                if device in did_device:
                    self.log.debug('skipping duplicate %s' % device)
                    continue
                did_device[device] = 1
                data = self.extract_smart_features(raw_data)
                self.put_device_metrics(ioctx, device, data)

        ioctx.close()
        return 0, "", ""

    def scrape_device(self, devid):
        r = self.get("device " + devid)
        if not r or 'device' not in r.keys():
            return -errno.ENOENT, '', 'device ' + devid + ' not found'
        daemons = r['device'].get('daemons', [])
        osds = [int(r[4:]) for r in daemons if r.startswith('osd.')]
        if not osds:
            return (-errno.EAGAIN, '',
                    'device ' + devid + ' not claimed by any active '
                                        'OSD daemons')
        osd_id = osds[0]
        ioctx = self.open_connection()
        raw_smart_data = self.do_scrape_osd(osd_id, devid=devid)
        if raw_smart_data:
            for device, raw_data in raw_smart_data.items():
                data = self.extract_smart_features(raw_data)
                self.put_device_metrics(ioctx, device, data)
        ioctx.close()
        return 0, "", ""

    def do_scrape_osd(self, osd_id, devid=''):
        """
        :return: a dict, or None if the scrape failed.
        """
        self.log.debug('do_scrape_osd osd.%d' % osd_id)

        # scrape from osd
        result = CommandResult('')
        self.send_command(result, 'osd', str(osd_id), json.dumps({
            'prefix': 'smart',
            'format': 'json',
            'devid': devid,
        }), '')
        r, outb, outs = result.wait()

        try:
            return json.loads(outb)
        except (IndexError, ValueError):
            self.log.error(
                "Fail to parse JSON result from OSD {0} ({1})".format(
                    osd_id, outb))

    def put_device_metrics(self, ioctx, devid, data):
        old_key = datetime.utcnow() - timedelta(
            seconds=int(self.retention_period))
        prune = old_key.strftime(TIME_FORMAT)
        self.log.debug('put_device_metrics device %s prune %s' %
                       (devid, prune))
        erase = []
        try:
            with rados.ReadOpCtx() as op:
                omap_iter, ret = ioctx.get_omap_keys(op, "", 500)  # fixme
                assert ret == 0
                ioctx.operate_read_op(op, devid)
                for key, _ in list(omap_iter):
                    if key >= prune:
                        break
                    erase.append(key)
        except rados.ObjectNotFound:
            # The object doesn't already exist, no problem.
            pass
        except rados.Error as e:
            # Do not proceed with writes if something unexpected
            # went wrong with the reads.
            self.log.exception("Error reading OMAP: {0}".format(e))
            return

        key = datetime.utcnow().strftime(TIME_FORMAT)
        self.log.debug('put_device_metrics device %s key %s = %s, erase %s' %
                       (devid, key, data, erase))
        with rados.WriteOpCtx() as op:
            ioctx.set_omap(op, (key,), (str(json.dumps(data)),))
            if len(erase):
                ioctx.remove_omap_keys(op, tuple(erase))
            ioctx.operate_write_op(op, devid)

    def show_device_metrics(self, devid, sample):
        # verify device exists
        r = self.get("device " + devid)
        if not r or 'device' not in r.keys():
            return -errno.ENOENT, '', 'device ' + devid + ' not found'
        # fetch metrics
        res = {}
        ioctx = self.open_connection(create_if_missing=False)
        if ioctx:
            with rados.ReadOpCtx() as op:
                omap_iter, ret = ioctx.get_omap_vals(op, "", sample or '', 500)  # fixme
                assert ret == 0
                try:
                    ioctx.operate_read_op(op, devid)
                    for key, value in list(omap_iter):
                        if sample and key != sample:
                            break
                        try:
                            v = json.loads(value)
                        except (ValueError, IndexError):
                            self.log.debug('unable to parse value for %s: "%s"' %
                                           (key, value))
                            pass
                        res[key] = v
                except rados.ObjectNotFound:
                    pass
                except rados.Error as e:
                    self.log.exception("RADOS error reading omap: {0}".format(e))
                    raise

        return 0, json.dumps(res, indent=4), ''

    def check_health(self):
        self.log.info('Check health')
        config = self.get('config')
        min_in_ratio = float(config.get('mon_osd_min_in_ratio'))
        mark_out_threshold_td = timedelta(seconds=int(self.mark_out_threshold))
        warn_threshold_td = timedelta(seconds=int(self.warn_threshold))
        checks = {}
        health_warnings = {
            DEVICE_HEALTH: [],
            DEVICE_HEALTH_IN_USE: [],
            }
        devs = self.get("devices")
        osds_in = {}
        osds_out = {}
        now = datetime.utcnow()
        osdmap = self.get("osd_map")
        assert osdmap is not None
        for dev in devs['devices']:
            devid = dev['devid']
            if 'life_expectancy_min' not in dev:
                continue
            # ignore devices that are not consumed by any daemons
            if not dev['daemons']:
                continue
            # life_expectancy_(min/max) is in the format of:
            # '%Y-%m-%d %H:%M:%S.%f', e.g.:
            # '2019-01-20 21:12:12.000000'
            life_expectancy_min = datetime.strptime(
                dev['life_expectancy_min'],
                '%Y-%m-%d %H:%M:%S.%f')
            self.log.debug('device %s expectancy min %s', dev,
                           life_expectancy_min)

            if life_expectancy_min - now <= mark_out_threshold_td:
                if self.self_heal:
                    # dev['daemons'] == ["osd.0","osd.1","osd.2"]
                    if dev['daemons']:
                        osds = [x for x in dev['daemons']
                                if x.startswith('osd.')]
                        osd_ids = map(lambda x: x[4:], osds)
                        for _id in osd_ids:
                            if self.is_osd_in(osdmap, _id):
                                osds_in[_id] = life_expectancy_min
                            else:
                                osds_out[_id] = 1

            if life_expectancy_min - now <= warn_threshold_td:
                # device can appear in more than one location in case
                # of SCSI multipath
                device_locations = map(lambda x: x['host'] + ':' + x['dev'],
                                       dev['location'])
                health_warnings[DEVICE_HEALTH].append(
                    '%s (%s); daemons %s; life expectancy between %s and %s'
                    % (dev['devid'],
                       ','.join(device_locations),
                       ','.join(dev.get('daemons', ['none'])),
                       dev['life_expectancy_min'],
                       dev.get('life_expectancy_max', 'unknown')))
                # TODO: by default, dev['life_expectancy_max'] == '0.000000',
                # so dev.get('life_expectancy_max', 'unknown')
                # above should be altered.

        # OSD might be marked 'out' (which means it has no
        # data), however PGs are still attached to it.
        for _id in osds_out.iterkeys():
            num_pgs = self.get_osd_num_pgs(_id)
            if num_pgs > 0:
                health_warnings[DEVICE_HEALTH_IN_USE].append(
                    'osd.%s is marked out '
                    'but still has %s PG(s)' %
                    (_id, num_pgs))
        if osds_in:
            self.log.debug('osds_in %s' % osds_in)
            # calculate target in ratio
            num_osds = len(osdmap['osds'])
            num_in = len([x for x in osdmap['osds'] if x['in']])
            num_bad = len(osds_in)
            # sort with next-to-fail first
            bad_osds = sorted(osds_in.items(), key=operator.itemgetter(1))
            did = 0
            to_mark_out = []
            for osd_id, when in bad_osds:
                ratio = float(num_in - did - 1) / float(num_osds)
                if ratio < min_in_ratio:
                    final_ratio = float(num_in - num_bad) / float(num_osds)
                    checks[DEVICE_HEALTH_TOOMANY] = {
                        'severity': 'warning',
                        'summary': HEALTH_MESSAGES[DEVICE_HEALTH_TOOMANY],
                        'detail': [
                            '%d OSDs with failing device(s) would bring "in" ratio to %f < mon_osd_min_in_ratio %f' % (num_bad - did, final_ratio, min_in_ratio)
                        ]
                    }
                    break
                to_mark_out.append(osd_id)
                did += 1
            if to_mark_out:
                self.mark_out_etc(to_mark_out)
        for warning, ls in health_warnings.iteritems():
            n = len(ls)
            if n:
                checks[warning] = {
                    'severity': 'warning',
                    'summary': HEALTH_MESSAGES[warning] % n,
                    'detail': ls,
                }
        self.set_health_checks(checks)
        return 0, "", ""

    def is_osd_in(self, osdmap, osd_id):
        for osd in osdmap['osds']:
            if str(osd_id) == str(osd['osd']):
                return bool(osd['in'])
        return False

    def get_osd_num_pgs(self, osd_id):
        stats = self.get('osd_stats')
        assert stats is not None
        for stat in stats['osd_stats']:
            if str(osd_id) == str(stat['osd']):
                return stat['num_pgs']
        return -1

    def mark_out_etc(self, osd_ids):
        self.log.info('Marking out OSDs: %s' % osd_ids)
        result = CommandResult('')
        self.send_command(result, 'mon', '', json.dumps({
            'prefix': 'osd out',
            'format': 'json',
            'ids': osd_ids,
        }), '')
        r, outb, outs = result.wait()
        if r != 0:
            self.log.warn('Could not mark OSD %s out. r: [%s], outb: [%s], outs: [%s]' % (osd_ids, r, outb, outs))
        for osd_id in osd_ids:
            result = CommandResult('')
            self.send_command(result, 'mon', '', json.dumps({
                'prefix': 'osd primary-affinity',
                'format': 'json',
                'id': int(osd_id),
                'weight': 0.0,
            }), '')
            r, outb, outs = result.wait()
            if r != 0:
                self.log.warn('Could not set osd.%s primary-affinity, r: [%s], outs: [%s]' % (osd_id, r, outb, outs))

    def extract_smart_features(self, raw):
        # FIXME: extract and normalize raw smartctl --json output and
        # generate a dict of the fields we care about.
        return raw
Beispiel #51
0
class SimplePikaTopicConsumer(object):
    """
  This is a pika (Python-RabbitMq) message consumer (topic routing), which is
  heavily based on the asynchronous example provided in the pike documentation.
  It should handle unexpected interactions with RabbitMQ such as channel and
  connection closures.

  If RabbitMQ closes the connection, it will reopen it. You should
  look at the output, as there are limited reasons why the connection may
  be closed, which usually are tied to permission related issues or
  socket timeouts.

  If the channel is closed, it will indicate a problem with one of the
  commands that were issued and that should surface in the output as well.

  Example:

    pc = PikaTopicConsumer(
      amqp_url='amqp://*****:*****@localhost:5672/%2F',
      routing_key='pub_thread.text',
      exchange_name='sex_change',
      queue_name='g_queue',
    )
    pc.start_thread()
  """
    EXCHANGE_TYPE = 'topic'
    THREAD_TEMPO_SEC = 1.0  # how long it will take the thread to quit
    RPC_QUEUE_NAME = 'RPC_QUEUE'

    def __init__(self, amqp_url, routing_key, exchange_name, queue_name):
        """Create a new instance of the consumer class, passing in the AMQP
    URL used to connect to RabbitMQ.

    :param str amqp_url: The AMQP url to connect with

    """
        self._connection = None
        self._channel = None
        self._closing = False
        self._consumer_tag = None
        self._url = amqp_url
        self._task_run_event = ThreadEvent()
        self._thread_tempo_sec = self.THREAD_TEMPO_SEC
        self._exchange_name = exchange_name
        self._routing_key = routing_key
        self._queue_name = queue_name

    def connect(self):
        """This method connects to RabbitMQ, returning the connection handle.
    When the connection is established, the on_connection_open method
    will be invoked by pika.

    :rtype: pika.SelectConnection

    """
        LOGGER.info('Connecting to %s', self._url)
        return pika.SelectConnection(pika.URLParameters(self._url),
                                     self.on_connection_open,
                                     stop_ioloop_on_close=False)

    def on_connection_open(self, unused_connection):
        """This method is called by pika once the connection to RabbitMQ has
    been established. It passes the handle to the connection object in
    case we need it, but in this case, we'll just mark it unused.

    :type unused_connection: pika.SelectConnection

    """
        LOGGER.info('Connection opened')
        self.add_on_connection_close_callback()
        self.open_channel()

    def add_on_connection_close_callback(self):
        """This method adds an on close callback that will be invoked by pika
    when RabbitMQ closes the connection to the publisher unexpectedly.

    """
        LOGGER.info('Adding connection close callback')
        self._connection.add_on_close_callback(self.on_connection_closed)

    def on_connection_closed(self, connection, reply_code, reply_text):
        """This method is invoked by pika when the connection to RabbitMQ is
    closed unexpectedly. Since it is unexpected, we will reconnect to
    RabbitMQ if it disconnects.

    :param pika.connection.Connection connection: The closed connection obj
    :param int reply_code: The server provided reply_code if given
    :param str reply_text: The server provided reply_text if given

    """
        self._channel = None
        if self._closing:
            self._connection.ioloop.stop()
        else:
            LOGGER.warning(
                'Connection closed, reopening in 5 seconds: (%s) %s',
                reply_code, reply_text)
            self._connection.add_timeout(5, self.reconnect)

    def reconnect(self):
        """Will be invoked by the IOLoop timer if the connection is
    closed. See the on_connection_closed method.

    """
        # This is the old connection IOLoop instance, stop its ioloop
        self._connection.ioloop.stop()

        if not self._closing:

            # Create a new connection
            self._connection = self.connect()

            # There is now a new connection, needs a new ioloop to run
            self._connection.ioloop.start()

    def open_channel(self):
        """Open a new channel with RabbitMQ by issuing the Channel.Open RPC
    command. When RabbitMQ responds that the channel is open, the
    on_channel_open callback will be invoked by pika.

    """
        LOGGER.info('Creating a new channel')
        self._connection.channel(on_open_callback=self.on_channel_open)

    def on_channel_open(self, channel):
        """This method is invoked by pika when the channel has been opened.
    The channel object is passed in so we can make use of it.

    Since the channel is now open, we'll declare the exchange to use.

    :param pika.channel.Channel channel: The channel object

    """
        LOGGER.info('Channel opened')
        self._channel = channel
        self.add_on_channel_close_callback()
        self.setup_exchange(self._exchange_name)

    def add_on_channel_close_callback(self):
        """This method tells pika to call the on_channel_closed method if
    RabbitMQ unexpectedly closes the channel.

    """
        LOGGER.info('Adding channel close callback')
        self._channel.add_on_close_callback(self.on_channel_closed)

    def on_channel_closed(self, channel, reply_code, reply_text):
        """Invoked by pika when RabbitMQ unexpectedly closes the channel.
    Channels are usually closed if you attempt to do something that
    violates the protocol, such as re-declare an exchange or queue with
    different parameters. In this case, we'll close the connection
    to shutdown the object.

    :param pika.channel.Channel: The closed channel
    :param int reply_code: The numeric reason the channel was closed
    :param str reply_text: The text reason the channel was closed

    """
        LOGGER.warning('Channel %i was closed: (%s) %s', channel, reply_code,
                       reply_text)
        self._connection.close()

    def setup_exchange(self, exchange_name):
        """Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
    command. When it is complete, the on_exchange_declareok method will
    be invoked by pika.

    :param str|unicode exchange_name: The name of the exchange to declare

    """
        LOGGER.info('Declaring exchange %s', exchange_name)
        self._channel.exchange_declare(callback=self.on_exchange_declareok,
                                       exchange=exchange_name,
                                       exchange_type=self.EXCHANGE_TYPE,
                                       durable=False)

    def on_exchange_declareok(self, unused_frame):
        """Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
    command.

    :param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame

    """
        LOGGER.info('Exchange declared')
        self.setup_queue(self.RPC_QUEUE_NAME)

    def setup_queue(self, queue_name):
        """Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
    command. When it is complete, the on_queue_declareok method will
    be invoked by pika.

    :param str|unicode queue_name: The name of the queue to declare.

    """
        LOGGER.info('Declaring queue %s', queue_name)
        self._channel.queue_declare(callback=self.on_queue_declareok,
                                    queue=self._queue_name,
                                    arguments={'x-message-ttl': 1000})

    def on_queue_declareok(self, method_frame):
        """Method invoked by pika when the Queue.Declare RPC call made in
    setup_queue has completed. In this method we will bind the queue
    and exchange together with the routing key by issuing the Queue.Bind
    RPC command. When this command is complete, the on_bindok method will
    be invoked by pika.

    :param pika.frame.Method method_frame: The Queue.DeclareOk frame

    """
        LOGGER.info('Binding %s to %s with %s', self._exchange_name,
                    self._queue_name, self._routing_key)
        self._channel.queue_bind(self.on_bindok, self._queue_name,
                                 self._exchange_name, self._routing_key)

    def on_bindok(self, unused_frame):
        """Invoked by pika when the Queue.Bind method has completed. At this
    point we will start consuming messages by calling start_consuming
    which will invoke the needed RPC commands to start the process.

    :param pika.frame.Method unused_frame: The Queue.BindOk response frame

    """
        LOGGER.info('Queue bound')
        self.start_consuming()

    def start_consuming(self):
        """This method sets up the consumer by first calling
    add_on_cancel_callback so that the object is notified if RabbitMQ
    cancels the consumer. It then issues the Basic.Consume RPC command
    which returns the consumer tag that is used to uniquely identify the
    consumer with RabbitMQ. We keep the value to use it when we want to
    cancel consuming. The on_message method is passed in as a callback pika
    will invoke when a message is fully received.

    """
        LOGGER.info('Issuing consumer related RPC commands')
        self.add_on_cancel_callback()
        self._consumer_tag = self._channel.basic_consume(
            self.on_message, self._queue_name)

    def add_on_cancel_callback(self):
        """Add a callback that will be invoked if RabbitMQ cancels the consumer
    for some reason. If RabbitMQ does cancel the consumer,
    on_consumer_cancelled will be invoked by pika.

    """
        LOGGER.info('Adding consumer cancellation callback')
        self._channel.add_on_cancel_callback(self.on_consumer_cancelled)

    def on_consumer_cancelled(self, method_frame):
        """Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer
    receiving messages.

    :param pika.frame.Method method_frame: The Basic.Cancel frame

    """
        LOGGER.info('Consumer was cancelled remotely, shutting down: %r',
                    method_frame)
        if self._channel:
            self._channel.close()

    def on_message(self, unused_channel, basic_deliver, properties, body):
        """Invoked by pika when a message is delivered from RabbitMQ. The
    channel is passed for your convenience. The basic_deliver object that
    is passed in carries the exchange, routing key, delivery tag and
    a redelivered flag for the message. The properties passed in is an
    instance of BasicProperties with the message properties and the body
    is the message that was sent.

    :param pika.channel.Channel unused_channel: The channel object
    :param pika.Spec.Basic.Deliver: basic_deliver method
    :param pika.Spec.BasicProperties: properties
    :param str|unicode body: The message body

    """
        LOGGER.info('Received message # %s from %s: %s',
                    basic_deliver.delivery_tag, properties.app_id, body)
        self.acknowledge_message(basic_deliver.delivery_tag)

    def acknowledge_message(self, delivery_tag):
        """Acknowledge the message delivery from RabbitMQ by sending a
    Basic.Ack RPC method for the delivery tag.

    :param int delivery_tag: The delivery tag from the Basic.Deliver frame

    """
        LOGGER.info('Acknowledging message %s', delivery_tag)
        try:
            self._channel.basic_ack(delivery_tag)
        except:
            LOGGER.info('Acknowledgment requires an open channel')

    def nak_message(self, delivery_tag):
        LOGGER.info('Not acknowledging message %s', delivery_tag)
        try:
            self._channel.basic_nack(delivery_tag)
        except:
            LOGGER.info('Acknowledgment requires an open channel')

    def stop_consuming(self):
        """Tell RabbitMQ that you would like to stop consuming by sending the
    Basic.Cancel RPC command.

    """
        if self._channel:
            LOGGER.info('Sending a Basic.Cancel RPC command to RabbitMQ')
            self._channel.basic_cancel(self.on_cancelok, self._consumer_tag)

    def on_cancelok(self, unused_frame):
        """This method is invoked by pika when RabbitMQ acknowledges the
    cancellation of a consumer. At this point we will close the channel.
    This will invoke the on_channel_closed method once the channel has been
    closed, which will in-turn close the connection.

    :param pika.frame.Method unused_frame: The Basic.CancelOk frame

    """
        LOGGER.info('RabbitMQ acknowledged the cancellation of the consumer')
        self.close_channel()

    def close_channel(self):
        """Call to close the channel with RabbitMQ cleanly by issuing the
    Channel.Close RPC command.

    """
        LOGGER.info('Closing the channel')
        self._channel.close()

    def run(self):
        """Run the example consumer by connecting to RabbitMQ and then
    starting the IOLoop to block and allow the SelectConnection to operate.

    """
        if self._connection is None:
            self._connection = self.connect()
        self._connection.ioloop.start()

    def stop(self):
        """Cleanly shutdown the connection to RabbitMQ by stopping the consumer
    with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok
    will be invoked by pika, which will then closing the channel and
    connection. The IOLoop is started again because this method is invoked
    when CTRL-C is pressed raising a KeyboardInterrupt exception. This
    exception stops the IOLoop which needs to be running for pika to
    communicate with RabbitMQ. All of the commands issued prior to starting
    the IOLoop will be buffered but not processed.

    """
        LOGGER.info('Stopping')
        self._closing = True
        self.stop_consuming()
        self._connection.ioloop.stop()
        LOGGER.info('Stopped')

    def close_connection(self):
        """This method closes the connection to RabbitMQ."""
        LOGGER.info('Closing connection')
        self._connection.close()

    def start_thread(self):
        """Add a thread so that the run method doesn't steal our program control."""
        self._task_run_event.set()
        self._connection = self.connect()

        def thread_runner(self):
            def timeout_callback():
                if not self._closing:
                    if self._task_run_event.is_set():
                        self._connection.add_timeout(
                            deadline=self.THREAD_TEMPO_SEC,
                            callback_method=timeout_callback)
                    else:
                        self.stop()

            self._connection.add_timeout(deadline=self.THREAD_TEMPO_SEC,
                                         callback_method=timeout_callback)
            self.run()

        thread = Thread(target=thread_runner, args=(self, ), daemon=True)
        thread.start()

    def stop_thread(self):
        self._task_run_event.clear()
Beispiel #52
0
class SettingsFrame(wx.Frame):
    def __init__(self, *args, **kwargs):
        # begin wxGlade: MyFrame.__init__
        self.tabs_factory = kwargs.pop('tabs_factory', [])
        self.cancel_hook = kwargs.pop('on_cancel', None)
        wx.Frame.__init__(self, None, wx.ID_ANY,
                          _("Embroidery Params")
                          )
        self.notebook = wx.Notebook(self, wx.ID_ANY)
        self.tabs = self.tabs_factory(self.notebook)

        for tab in self.tabs:
            tab.on_change(self.update_simulator)

        self.simulate_window = None
        self.simulate_thread = None
        self.simulate_refresh_needed = Event()

        wx.CallLater(1000, self.update_simulator)

        self.presets_box = wx.StaticBox(self, wx.ID_ANY, label=_("Presets"))

        self.preset_chooser = wx.ComboBox(self, wx.ID_ANY)
        self.update_preset_list()

        self.load_preset_button = wx.Button(self, wx.ID_ANY, _("Load"))
        self.load_preset_button.Bind(wx.EVT_BUTTON, self.load_preset)

        self.add_preset_button = wx.Button(self, wx.ID_ANY, _("Add"))
        self.add_preset_button.Bind(wx.EVT_BUTTON, self.add_preset)

        self.overwrite_preset_button = wx.Button(self, wx.ID_ANY, _("Overwrite"))
        self.overwrite_preset_button.Bind(wx.EVT_BUTTON, self.overwrite_preset)

        self.delete_preset_button = wx.Button(self, wx.ID_ANY, _("Delete"))
        self.delete_preset_button.Bind(wx.EVT_BUTTON, self.delete_preset)

        self.cancel_button = wx.Button(self, wx.ID_ANY, _("Cancel"))
        self.cancel_button.Bind(wx.EVT_BUTTON, self.cancel)
        self.Bind(wx.EVT_CLOSE, self.cancel)

        self.use_last_button = wx.Button(self, wx.ID_ANY, _("Use Last Settings"))
        self.use_last_button.Bind(wx.EVT_BUTTON, self.use_last)

        self.apply_button = wx.Button(self, wx.ID_ANY, _("Apply and Quit"))
        self.apply_button.Bind(wx.EVT_BUTTON, self.apply)

        self.__set_properties()
        self.__do_layout()
        # end wxGlade

    def update_simulator(self, tab=None):
        if self.simulate_window:
            self.simulate_window.stop()
            self.simulate_window.clear()

        if not self.simulate_thread or not self.simulate_thread.is_alive():
            self.simulate_thread = Thread(target=self.simulate_worker)
            self.simulate_thread.daemon = True
            self.simulate_thread.start()

        self.simulate_refresh_needed.set()

    def simulate_worker(self):
        while True:
            self.simulate_refresh_needed.wait()
            self.simulate_refresh_needed.clear()
            self.update_patches()

    def update_patches(self):
        patches = self.generate_patches()

        if patches and not self.simulate_refresh_needed.is_set():
            wx.CallAfter(self.refresh_simulator, patches)

    def refresh_simulator(self, patches):
        stitch_plan = patches_to_stitch_plan(patches)
        if self.simulate_window:
            self.simulate_window.stop()
            self.simulate_window.load(stitch_plan=stitch_plan)
        else:
            my_rect = self.GetRect()
            simulator_pos = my_rect.GetTopRight()
            simulator_pos.x += 5

            screen_rect = wx.Display(0).ClientArea
            max_width = screen_rect.GetWidth() - my_rect.GetWidth()
            max_height = screen_rect.GetHeight()

            try:
                self.simulate_window = EmbroiderySimulator(None, -1, _("Preview"),
                                                           simulator_pos,
                                                           size=(300, 300),
                                                           stitch_plan=stitch_plan,
                                                           on_close=self.simulate_window_closed,
                                                           target_duration=5,
                                                           max_width=max_width,
                                                           max_height=max_height)
            except:
                error = traceback.format_exc()

                try:
                    # a window may have been created, so we need to destroy it
                    # or the app will never exit
                    wx.Window.FindWindowByName("Preview").Destroy()
                except:
                    pass

                info_dialog(self, error, _("Internal Error"))

            self.simulate_window.Show()
            wx.CallLater(10, self.Raise)

        wx.CallAfter(self.simulate_window.go)

    def simulate_window_closed(self):
        self.simulate_window = None

    def generate_patches(self):
        patches = []
        nodes = []

        for tab in self.tabs:
            tab.apply()

            if tab.enabled() and not tab.is_dependent_tab():
                nodes.extend(tab.nodes)

        # sort nodes into the proper stacking order
        nodes.sort(key=lambda node: node.order)

        try:
            for node in nodes:
                if self.simulate_refresh_needed.is_set():
                    # cancel; params were updated and we need to start over
                    return []

                # Making a copy of the embroidery element is an easy
                # way to drop the cache in the @cache decorators used
                # for many params in embroider.py.

                patches.extend(copy(node).embroider(None))
        except SystemExit:
            raise
        except:
            # Ignore errors.  This can be things like incorrect paths for
            # satins or division by zero caused by incorrect param values.
            pass

        return patches

    def update_preset_list(self):
        preset_names = load_presets().keys()
        preset_names = [preset for preset in preset_names if preset != "__LAST__"]
        self.preset_chooser.SetItems(sorted(preset_names))

    def get_preset_name(self):
        preset_name = self.preset_chooser.GetValue().strip()
        if preset_name:
            return preset_name
        else:
            info_dialog(self, _("Please enter or select a preset name first."), caption=_('Preset'))
            return

    def check_and_load_preset(self, preset_name):
        preset = load_preset(preset_name)
        if not preset:
            info_dialog(self, _('Preset "%s" not found.') % preset_name, caption=_('Preset'))

        return preset

    def get_preset_data(self):
        preset = {}

        current_tab = self.tabs[self.notebook.GetSelection()]
        while current_tab.parent_tab:
            current_tab = current_tab.parent_tab

        tabs = [current_tab]
        if current_tab.paired_tab:
            tabs.append(current_tab.paired_tab)
            tabs.extend(current_tab.paired_tab.dependent_tabs)
        tabs.extend(current_tab.dependent_tabs)

        for tab in tabs:
            tab.save_preset(preset)

        return preset

    def add_preset(self, event, overwrite=False):
        preset_name = self.get_preset_name()
        if not preset_name:
            return

        if not overwrite and load_preset(preset_name):
            info_dialog(self, _('Preset "%s" already exists.  Please use another name or press "Overwrite"') % preset_name, caption=_('Preset'))

        save_preset(preset_name, self.get_preset_data())
        self.update_preset_list()

        event.Skip()

    def overwrite_preset(self, event):
        self.add_preset(event, overwrite=True)


    def _load_preset(self, preset_name):
        preset = self.check_and_load_preset(preset_name)
        if not preset:
            return

        for tab in self.tabs:
            tab.load_preset(preset)


    def load_preset(self, event):
        preset_name = self.get_preset_name()
        if not preset_name:
            return

        self._load_preset(preset_name)

        event.Skip()


    def delete_preset(self, event):
        preset_name = self.get_preset_name()
        if not preset_name:
            return

        preset = self.check_and_load_preset(preset_name)
        if not preset:
            return

        delete_preset(preset_name)
        self.update_preset_list()
        self.preset_chooser.SetValue("")

        event.Skip()

    def _apply(self):
        for tab in self.tabs:
            tab.apply()

    def apply(self, event):
        self._apply()
        save_preset("__LAST__", self.get_preset_data())
        self.close()

    def use_last(self, event):
        self._load_preset("__LAST__")
        self.apply(event)

    def close(self):
        if self.simulate_window:
            self.simulate_window.stop()
            self.simulate_window.Close()

        self.Destroy()

    def cancel(self, event):
        if self.cancel_hook:
            self.cancel_hook()

        self.close()

    def __set_properties(self):
        # begin wxGlade: MyFrame.__set_properties
        self.notebook.SetMinSize((800, 600))
        self.preset_chooser.SetSelection(-1)
        # end wxGlade

    def __do_layout(self):
        # begin wxGlade: MyFrame.__do_layout
        sizer_1 = wx.BoxSizer(wx.VERTICAL)
        #self.sizer_3_staticbox.Lower()
        sizer_2 = wx.StaticBoxSizer(self.presets_box, wx.HORIZONTAL)
        sizer_3 = wx.BoxSizer(wx.HORIZONTAL)
        for tab in self.tabs:
            self.notebook.AddPage(tab, tab.name)
        sizer_1.Add(self.notebook, 1, wx.EXPAND|wx.LEFT|wx.TOP|wx.RIGHT, 10)
        sizer_2.Add(self.preset_chooser, 1, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
        sizer_2.Add(self.load_preset_button, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
        sizer_2.Add(self.add_preset_button, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
        sizer_2.Add(self.overwrite_preset_button, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
        sizer_2.Add(self.delete_preset_button, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
        sizer_1.Add(sizer_2, 0, flag=wx.EXPAND|wx.ALL, border=10)
        sizer_3.Add(self.cancel_button, 0, wx.ALIGN_RIGHT|wx.RIGHT, 5)
        sizer_3.Add(self.use_last_button, 0, wx.ALIGN_RIGHT|wx.RIGHT|wx.BOTTOM, 5)
        sizer_3.Add(self.apply_button, 0, wx.ALIGN_RIGHT|wx.RIGHT|wx.BOTTOM, 5)
        sizer_1.Add(sizer_3, 0, wx.ALIGN_RIGHT, 0)
        self.SetSizer(sizer_1)
        sizer_1.Fit(self)
        self.Layout()
Beispiel #53
0
def main():
    args = get_args()

    # Check for depreciated argumented
    if args.debug:
        log.warning(
            '--debug is depreciated. Please use --verbose instead.  Enabling --verbose'
        )
        args.verbose = 'nofile'

    # Add file logging if enabled
    if args.verbose and args.verbose != 'nofile':
        filelog = logging.FileHandler(args.verbose)
        filelog.setFormatter(
            logging.Formatter(
                '%(asctime)s [%(threadName)16s][%(module)14s][%(levelname)8s] %(message)s'
            ))
        logging.getLogger('').addHandler(filelog)
    if args.very_verbose and args.very_verbose != 'nofile':
        filelog = logging.FileHandler(args.very_verbose)
        filelog.setFormatter(
            logging.Formatter(
                '%(asctime)s [%(threadName)16s][%(module)14s][%(levelname)8s] %(message)s'
            ))
        logging.getLogger('').addHandler(filelog)

    # Check if we have the proper encryption library file and get its path
    encryption_lib_path = get_encryption_lib_path(args)
    if encryption_lib_path is "":
        sys.exit(1)

    if args.verbose or args.very_verbose:
        log.setLevel(logging.DEBUG)
    else:
        log.setLevel(logging.INFO)

    # Let's not forget to run Grunt / Only needed when running with webserver
    if not args.no_server:
        if not os.path.exists(
                os.path.join(os.path.dirname(__file__), 'static/dist')):
            log.critical(
                'Missing front-end assets (static/dist) -- please run "npm install && npm run build" before starting the server'
            )
            sys.exit()

    # These are very noisey, let's shush them up a bit
    logging.getLogger('peewee').setLevel(logging.INFO)
    logging.getLogger('requests').setLevel(logging.WARNING)
    logging.getLogger('pgoapi.pgoapi').setLevel(logging.WARNING)
    logging.getLogger('pgoapi.rpc_api').setLevel(logging.INFO)
    logging.getLogger('werkzeug').setLevel(logging.ERROR)

    config['parse_pokemon'] = not args.no_pokemon
    config['parse_pokestops'] = not args.no_pokestops
    config['parse_gyms'] = not args.no_gyms

    # Turn these back up if debugging
    if args.verbose or args.very_verbose:
        logging.getLogger('pgoapi').setLevel(logging.DEBUG)
    if args.very_verbose:
        logging.getLogger('peewee').setLevel(logging.DEBUG)
        logging.getLogger('requests').setLevel(logging.DEBUG)
        logging.getLogger('pgoapi.pgoapi').setLevel(logging.DEBUG)
        logging.getLogger('pgoapi.rpc_api').setLevel(logging.DEBUG)
        logging.getLogger('rpc_api').setLevel(logging.DEBUG)
        logging.getLogger('werkzeug').setLevel(logging.DEBUG)

    # use lat/lng directly if matches such a pattern
    prog = re.compile("^(\-?\d+\.\d+),?\s?(\-?\d+\.\d+)$")
    res = prog.match(args.location)
    if res:
        log.debug('Using coordinates from CLI directly')
        position = (float(res.group(1)), float(res.group(2)), 0)
    else:
        log.debug('Looking up coordinates in API')
        position = util.get_pos_by_name(args.location)

    # Use the latitude and longitude to get the local altitude from Google
    try:
        url = 'https://maps.googleapis.com/maps/api/elevation/json?locations={},{}'.format(
            str(position[0]), str(position[1]))
        altitude = requests.get(url).json()[u'results'][0][u'elevation']
        log.debug('Local altitude is: %sm', altitude)
        position = (position[0], position[1], altitude)
    except (requests.exceptions.RequestException, IndexError, KeyError):
        log.error('Unable to retrieve altitude from Google APIs; setting to 0')

    if not any(position):
        log.error('Could not get a position by name, aborting')
        sys.exit()

    log.info('Parsed location is: %.4f/%.4f/%.4f (lat/lng/alt)', position[0],
             position[1], position[2])

    if args.no_pokemon:
        log.info('Parsing of Pokemon disabled')
    if args.no_pokestops:
        log.info('Parsing of Pokestops disabled')
    if args.no_gyms:
        log.info('Parsing of Gyms disabled')

    config['LOCALE'] = args.locale
    config['CHINA'] = args.china

    app = Pogom(__name__)
    db = init_database(app)
    if args.clear_db:
        log.info('Clearing database')
        if args.db_type == 'mysql':
            drop_tables(db)
        elif os.path.isfile(args.db):
            os.remove(args.db)
    create_tables(db)

    app.set_current_location(position)

    # Control the search status (running or not) across threads
    pause_bit = Event()
    pause_bit.clear()

    # Setup the location tracking queue and push the first location on
    new_location_queue = Queue()
    new_location_queue.put(position)

    # DB Updates
    db_updates_queue = Queue()

    # Thread(s) to process database updates
    for i in range(args.db_threads):
        log.debug('Starting db-updater worker thread %d', i)
        t = Thread(target=db_updater,
                   name='db-updater-{}'.format(i),
                   args=(args, db_updates_queue))
        t.daemon = True
        t.start()

    # db clearner; really only need one ever
    t = Thread(target=clean_db_loop, name='db-cleaner', args=(args, ))
    t.daemon = True
    t.start()

    # WH Updates
    wh_updates_queue = Queue()

    # Thread to process webhook updates
    for i in range(args.wh_threads):
        log.debug('Starting wh-updater worker thread %d', i)
        t = Thread(target=wh_updater,
                   name='wh-updater-{}'.format(i),
                   args=(args, wh_updates_queue))
        t.daemon = True
        t.start()

    if not args.only_server:

        # Check all proxies before continue so we know they are good
        if args.proxy:

            # Overwrite old args.proxy with new working list
            args.proxy = check_proxies(args)

        # Gather the pokemons!

        # check the sort of scan
        if args.spawnpoint_scanning:
            mode = 'sps'
        else:
            mode = 'hex'

        # attempt to dump the spawn points (do this before starting threads of endure the woe)
        if args.spawnpoint_scanning and args.spawnpoint_scanning != 'nofile' and args.dump_spawnpoints:
            with open(args.spawnpoint_scanning, 'w+') as file:
                log.info('Saving spawn points to %s', args.spawnpoint_scanning)
                spawns = Pokemon.get_spawnpoints_in_hex(
                    position, args.step_limit)
                file.write(json.dumps(spawns))
                log.info('Finished exporting spawn points')

        argset = (args, mode, new_location_queue, pause_bit,
                  encryption_lib_path, db_updates_queue, wh_updates_queue)

        log.debug('Starting a %s search thread', mode)
        search_thread = Thread(target=search_overseer_thread,
                               name='search-overseer',
                               args=argset)
        search_thread.daemon = True
        search_thread.start()

    if args.cors:
        CORS(app)

    # No more stale JS
    init_cache_busting(app)

    app.set_search_control(pause_bit)
    app.set_location_queue(new_location_queue)

    config['ROOT_PATH'] = app.root_path
    config['GMAPS_KEY'] = args.gmaps_key

    if args.no_server:
        # This loop allows for ctrl-c interupts to work since gevent won't be holding the program open
        while search_thread.is_alive():
            time.sleep(60)
    else:
        # run gevent server
        gevent_log = None
        if args.verbose or args.very_verbose:
            gevent_log = log
        if args.ssl_certificate and args.ssl_privatekey \
                and os.path.exists(args.ssl_certificate) and os.path.exists(args.ssl_privatekey):
            http_server = pywsgi.WSGIServer((args.host, args.port),
                                            app,
                                            log=gevent_log,
                                            error_log=log,
                                            keyfile=args.ssl_privatekey,
                                            certfile=args.ssl_certificate,
                                            ssl_version=ssl.PROTOCOL_TLSv1_2)
            log.info('Web server in SSL mode, listening at https://%s:%d',
                     args.host, args.port)
        else:
            http_server = pywsgi.WSGIServer((args.host, args.port),
                                            app,
                                            log=gevent_log,
                                            error_log=log)
            log.info('Web server listening at http://%s:%d', args.host,
                     args.port)
        # run it
        try:
            http_server.serve_forever()
        except KeyboardInterrupt:
            pass
class P4PProvider(QObject) :
    callbacksignal = pyqtSignal()
    def __init__(self):
        QObject.__init__(self)
        self.callbacksignal.connect(self.mycallback)
        self.callbackDoneEvent = Event()
        self.firstCallback = True
        self.isClosed = True
        self.monitorRateOnly = False
        self.ncallbacks = 0
        self.lastTime = time.time() 
        
    def start(self) :
        self.ctxt = Context('pva')
        self.firstCallback = True
        self.isClosed = False
        self.subscription = self.ctxt.monitor(
              getDynamicRecordName(),
              self.p4pcallback,
              request='field()',
              notify_disconnect=True)
    def stop(self) :
        self.isClosed = True
        self.ctxt.close()
    def done(self) :
        pass
    def callback(self,arg) :
        self.viewer.callback(arg)
    def p4pcallback(self,arg) :
        if self.monitorRateOnly :
            self.ncallbacks += 1
            timenow = time.time() 
            timediff = timenow - self.lastTime
            if timediff<1 : return
            print('rate=',round(self.ncallbacks/timediff))
            self.lastTime = timenow
            self.ncallbacks = 0
            return
        if self.isClosed : return
        self.struct = arg;
        self.callbacksignal.emit()
        self.callbackDoneEvent.wait()
        self.callbackDoneEvent.clear()
    def mycallback(self) :
        struct = self.struct
        arg = dict()
        try :
            argtype = str(type(struct))
            if argtype.find('Disconnected')>=0 :
                arg["status"] = "disconnected"
                self.callback(arg)
                self.firstCallback = True
                self.callbackDoneEvent.set()
                return
            if self.firstCallback :
                arg = dict()
                arg["status"] = "connected"
                self.callback(arg)
                self.firstCallback = False
                self.callback(arg)
            data = DynamicRecordData()
            data.name = struct['name']
            data.x = struct['x']
            data.y = struct['y']
            data.xmin = struct['xmin']
            data.xmax = struct['xmax']
            data.ymin = struct['ymin']
            data.ymax = struct['ymax']
            arg = dict()
            arg['value'] = data
            self.callback(arg)
            self.callbackDoneEvent.set()
            return
        except Exception as error:
            arg["exception"] = repr(error)
            self.callback(arg)
            self.callbackDoneEvent.set()
            return
Beispiel #55
0
class BrowserView:
    instances = {}

    class JSBridge:
        def __init__(self, api_instance, parent_uid):
            self.api = api_instance
            self.uid = uuid1().hex[:8]
            self.parent_uid = parent_uid

        def call(self, func_name, param):
            if param == 'undefined':
                param = None
            return _js_bridge_call(self.parent_uid, self.api, func_name, param)

    def __init__(self, uid, title, url, width, height, resizable, fullscreen, min_size,
                 confirm_quit, background_color, debug, js_api, text_select, webview_ready):
        BrowserView.instances[uid] = self
        self.uid = uid

        self.webview_ready = webview_ready
        self.is_fullscreen = False
        self.js_result_semaphores = []
        self.eval_js_lock = Lock()
        self.load_event = Event()
        self.load_event.clear()

        glib.threads_init()
        self.window = gtk.Window(title=title)

        if resizable:
            self.window.set_size_request(min_size[0], min_size[1])
            self.window.resize(width, height)
        else:
            self.window.set_size_request(width, height)

        self.window.set_resizable(resizable)
        self.window.set_position(gtk.WindowPosition.CENTER)

        # Set window background color
        style_provider = gtk.CssProvider()
        style_provider.load_from_data(
            'GtkWindow {{ background-color: {}; }}'.format(background_color).encode()
        )
        gtk.StyleContext.add_provider_for_screen(
            Gdk.Screen.get_default(),
            style_provider,
            gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
        )

        scrolled_window = gtk.ScrolledWindow()
        self.window.add(scrolled_window)

        if confirm_quit:
            self.window.connect('delete-event', self.on_destroy)
        else:
            self.window.connect('delete-event', self.close_window)

        if js_api:
            self.js_bridge = BrowserView.JSBridge(js_api, self.uid)
        else:
            self.js_bridge = None

        self.text_select = text_select

        self.webview = webkit.WebView()
        self.webview.connect('notify::visible', self.on_webview_ready)
        self.webview.connect('document-load-finished', self.on_load_finish)
        self.webview.connect('status-bar-text-changed', self.on_status_change)
        self.webview.connect('new-window-policy-decision-requested', self.on_new_window_request)

        if debug:
            self.webview.props.settings.props.enable_developer_extras = True
            self.webview.get_inspector().connect('inspect-web-view', self.on_inspect_webview)
        else:
            self.webview.props.settings.props.enable_default_context_menu = False

        self.webview.props.settings.props.javascript_can_access_clipboard = True
        self.webview.props.opacity = 0.0
        scrolled_window.add(self.webview)

        if url is not None:
            self.webview.load_uri(url)
        elif js_api is None:
            self.load_event.set()

        if fullscreen:
            self.toggle_fullscreen()

    def close_window(self, *data):
        while gtk.events_pending():
            gtk.main_iteration()

        self.window.destroy()
        del BrowserView.instances[self.uid]

        try:    # Close inspector if open
            BrowserView.instances[self.uid + '-inspector'].window.destroy()
            del BrowserView.instances[self.uid + '-inspector']
        except KeyError:
            pass

        if BrowserView.instances == {}:
            gtk.main_quit()

        for s in self.js_result_semaphores:
            s.release()

    def on_destroy(self, widget=None, *data):
        dialog = gtk.MessageDialog(parent=self.window, flags=gtk.DialogFlags.MODAL & gtk.DialogFlags.DESTROY_WITH_PARENT,
                                          type=gtk.MessageType.QUESTION, buttons=gtk.ButtonsType.OK_CANCEL,
                                          message_format=localization['global.quitConfirmation'])
        result = dialog.run()
        if result == gtk.ResponseType.OK:
            self.close_window()

        dialog.destroy()
        return True

    def on_webview_ready(self, arg1, arg2):
        glib.idle_add(self.webview_ready.set)

    def on_load_finish(self, webview, webframe):
        # Show the webview if it's not already visible
        if not webview.props.opacity:
            glib.idle_add(webview.set_opacity, 1.0)

        if not self.text_select:
            webview.execute_script(disable_text_select)

        if self.js_bridge:
            self._set_js_api()
        else:
            self.load_event.set()

    def on_status_change(self, webview, status):
        try:
            delim = '_' + self.js_bridge.uid + '_'
        except AttributeError:
            return

        # Check if status was updated by a JSBridge call
        if status.startswith(delim):
            _, func_name, param = status.split(delim)
            return_val = self.js_bridge.call(func_name, param)
            # Give back the return value to JS as a string
            code = 'pywebview._bridge.return_val = "{0}";'.format(escape_string(str(return_val)))
            webview.execute_script(code)

    def on_inspect_webview(self, inspector, webview):
        title = 'Web Inspector - {}'.format(self.window.get_title())
        uid = self.uid + '-inspector'

        inspector = BrowserView(uid, title, '', 700, 500, True, False, (300,200),
                                False, '#fff', False, None, True, self.webview_ready)
        inspector.show()
        return inspector.webview

    def on_new_window_request(self, webview, frame, request, action, decision, *data):
        if action.get_target_frame() == '_blank':
            webbrowser.open(request.get_uri(), 2, True)
        decision.ignore()

    def show(self):
        self.window.show_all()

        if gtk.main_level() == 0:
            gtk.main()

    def destroy(self):
        self.window.emit('delete-event', Gdk.Event())

    def set_title(self, title):
        self.window.set_title(title)

    def toggle_fullscreen(self):
        if self.is_fullscreen:
            self.window.unfullscreen()
        else:
            self.window.fullscreen()

        self.is_fullscreen = not self.is_fullscreen

    def create_file_dialog(self, dialog_type, directory, allow_multiple, save_filename, file_types):
        if dialog_type == FOLDER_DIALOG:
            gtk_dialog_type = gtk.FileChooserAction.SELECT_FOLDER
            title = localization['linux.openFolder']
            button = gtk.STOCK_OPEN
        elif dialog_type == OPEN_DIALOG:
            gtk_dialog_type = gtk.FileChooserAction.OPEN
            if allow_multiple:
                title = localization['linux.openFiles']
            else:
                title = localization['linux.openFile']

            button = gtk.STOCK_OPEN
        elif dialog_type == SAVE_DIALOG:
            gtk_dialog_type = gtk.FileChooserAction.SAVE
            title = localization['global.saveFile']
            button = gtk.STOCK_SAVE

        dialog = gtk.FileChooserDialog(title, self.window, gtk_dialog_type,
                                       (gtk.STOCK_CANCEL, gtk.ResponseType.CANCEL, button, gtk.ResponseType.OK))

        dialog.set_select_multiple(allow_multiple)
        dialog.set_current_folder(directory)
        self._add_file_filters(dialog, file_types)

        if dialog_type == SAVE_DIALOG:
            dialog.set_current_name(save_filename)

        response = dialog.run()

        if response == gtk.ResponseType.OK:
            file_name = dialog.get_filenames()
        else:
            file_name = None

        dialog.destroy()

        return file_name

    def _add_file_filters(self, dialog, file_types):
        for s in file_types:
            description, extensions = parse_file_type(s)

            f = gtk.FileFilter()
            f.set_name(description)
            for e in extensions.split(';'):
                f.add_pattern(e)

            dialog.add_filter(f)

    def get_current_url(self):
        self.load_event.wait()
        uri = self.webview.get_uri()
        return uri

    def load_url(self, url):
        self.load_event.clear()
        self.webview.load_uri(url)

    def load_html(self, content, base_uri):
        self.load_event.clear()
        self.webview.load_string(content, 'text/html', 'utf-8', base_uri)

    def evaluate_js(self, script):
        def _evaluate_js():
            self.webview.execute_script(code)
            result_semaphore.release()

        self.eval_js_lock.acquire()
        result_semaphore = Semaphore(0)
        self.js_result_semaphores.append(result_semaphore)
        # Backup the doc title and store the result in it with a custom prefix
        unique_id = uuid1().hex
        code = 'window.oldTitle{0} = document.title; document.title = {1};'.format(unique_id, script)

        self.load_event.wait()
        glib.idle_add(_evaluate_js)
        result_semaphore.acquire()

        if not gtk.main_level():
            # Webview has been closed, don't proceed
            return None

        result = self.webview.get_title()
        result = None if result == 'undefined' or result == 'null' or result is None else result if result == '' else json.loads(result)

        # Restore document title and return
        code = 'document.title = window.oldTitle{0}'.format(unique_id)
        glib.idle_add(_evaluate_js)
        self.js_result_semaphores.remove(result_semaphore)
        self.eval_js_lock.release()

        return result


    def _set_js_api(self):
        def create_bridge():
            # Make the `call` method write the function name and param to the
            # `status` attribute of the JS window, delimited by a unique token.
            # The return value will be passed back to the `return_val` attribute
            # of the bridge by the on_status_change handler.
            code = """
            window.pywebview._bridge.call = function(funcName, param) {{
                window.status = "_{0}_" + funcName + "_{0}_" + param;
                return this.return_val;
            }};""".format(self.js_bridge.uid)

            # Create the `pywebview` JS api object
            self.webview.execute_script(parse_api_js(self.js_bridge.api))
            self.webview.execute_script(code)
            self.load_event.set()

        glib.idle_add(create_bridge)
Beispiel #56
0
class Module(MgrModule):
    metadata_keys = [
        "arch",
        "ceph_version",
        "os",
        "cpu",
        "kernel_description",
        "kernel_version",
        "distro_description",
        "distro"
    ]

    MODULE_OPTIONS = [
        Option(name='url',
               type='str',
               default='https://telemetry.ceph.com/report'),
        Option(name='device_url',
               type='str',
               default='https://telemetry.ceph.com/device'),
        Option(name='enabled',
               type='bool',
               default=False),
        Option(name='last_opt_revision',
               type='int',
               default=1),
        Option(name='leaderboard',
               type='bool',
               default=False),
        Option(name='description',
               type='str',
               default=None),
        Option(name='contact',
               type='str',
               default=None),
        Option(name='organization',
               type='str',
               default=None),
        Option(name='proxy',
               type='str',
               default=None),
        Option(name='interval',
               type='int',
               default=24,
               min=8),
        Option(name='channel_basic',
               type='bool',
               default=True,
               desc='Share basic cluster information (size, version)'),
        Option(name='channel_ident',
               type='bool',
               default=False,
               desc='Share a user-provided description and/or contact email for the cluster'),
        Option(name='channel_crash',
               type='bool',
               default=True,
               desc='Share metadata about Ceph daemon crashes (version, stack straces, etc)'),
        Option(name='channel_device',
               type='bool',
               default=True,
               desc=('Share device health metrics '
                     '(e.g., SMART data, minus potentially identifying info like serial numbers)')),
        Option(name='channel_perf',
               type='bool',
               default=False,
               desc='Share perf counter metrics summed across the whole cluster'),
    ]

    @property
    def config_keys(self) -> Dict[str, OptionValue]:
        return dict((o['name'], o.get('default', None)) for o in self.MODULE_OPTIONS)

    def __init__(self, *args: Any, **kwargs: Any) -> None:
        super(Module, self).__init__(*args, **kwargs)
        self.event = Event()
        self.run = False
        self.last_upload: Optional[int] = None
        self.last_report: Dict[str, Any] = dict()
        self.report_id: Optional[str] = None
        self.salt: Optional[str] = None
        # for mypy which does not run the code
        if TYPE_CHECKING:
            self.url = ''
            self.device_url = ''
            self.enabled = False
            self.last_opt_revision = 0
            self.leaderboard = ''
            self.interval = 0
            self.proxy = ''
            self.channel_basic = True
            self.channel_ident = False
            self.channel_crash = True
            self.channel_device = True
            self.channel_perf = False

    def config_notify(self) -> None:
        for opt in self.MODULE_OPTIONS:
            setattr(self,
                    opt['name'],
                    self.get_module_option(opt['name']))
            self.log.debug(' %s = %s', opt['name'], getattr(self, opt['name']))
        # wake up serve() thread
        self.event.set()

    def load(self) -> None:
        last_upload = self.get_store('last_upload', None)
        if last_upload is None:
            self.last_upload = None
        else:
            self.last_upload = int(last_upload)

        report_id = self.get_store('report_id', None)
        if report_id is None:
            self.report_id = str(uuid.uuid4())
            self.set_store('report_id', self.report_id)
        else:
            self.report_id = report_id

        salt = self.get_store('salt', None)
        if salt is None:
            self.salt = str(uuid.uuid4())
            self.set_store('salt', self.salt)
        else:
            self.salt = salt

    def gather_osd_metadata(self,
                            osd_map: Dict[str, List[Dict[str, int]]]) -> Dict[str, Dict[str, int]]:
        keys = ["osd_objectstore", "rotational"]
        keys += self.metadata_keys

        metadata: Dict[str, Dict[str, int]] = dict()
        for key in keys:
            metadata[key] = defaultdict(int)

        for osd in osd_map['osds']:
            res = self.get_metadata('osd', str(osd['osd']))
            if res is None:
                self.log.debug('Could not get metadata for osd.%s' % str(osd['osd']))
                continue
            for k, v in res.items():
                if k not in keys:
                    continue

                metadata[k][v] += 1

        return metadata

    def gather_mon_metadata(self,
                            mon_map: Dict[str, List[Dict[str, str]]]) -> Dict[str, Dict[str, int]]:
        keys = list()
        keys += self.metadata_keys

        metadata: Dict[str, Dict[str, int]] = dict()
        for key in keys:
            metadata[key] = defaultdict(int)

        for mon in mon_map['mons']:
            res = self.get_metadata('mon', mon['name'])
            if res is None:
                self.log.debug('Could not get metadata for mon.%s' % (mon['name']))
                continue
            for k, v in res.items():
                if k not in keys:
                    continue

                metadata[k][v] += 1

        return metadata

    def gather_crush_info(self) -> Dict[str, Union[int,
                                                   bool,
                                                   List[int],
                                                   Dict[str, int],
                                                   Dict[int, int]]]:
        osdmap = self.get_osdmap()
        crush_raw = osdmap.get_crush()
        crush = crush_raw.dump()

        BucketKeyT = TypeVar('BucketKeyT', int, str)

        def inc(d: Dict[BucketKeyT, int], k: BucketKeyT) -> None:
            if k in d:
                d[k] += 1
            else:
                d[k] = 1

        device_classes: Dict[str, int] = {}
        for dev in crush['devices']:
            inc(device_classes, dev.get('class', ''))

        bucket_algs: Dict[str, int] = {}
        bucket_types: Dict[str, int] = {}
        bucket_sizes: Dict[int, int] = {}
        for bucket in crush['buckets']:
            if '~' in bucket['name']:  # ignore shadow buckets
                continue
            inc(bucket_algs, bucket['alg'])
            inc(bucket_types, bucket['type_id'])
            inc(bucket_sizes, len(bucket['items']))

        return {
            'num_devices': len(crush['devices']),
            'num_types': len(crush['types']),
            'num_buckets': len(crush['buckets']),
            'num_rules': len(crush['rules']),
            'device_classes': list(device_classes.values()),
            'tunables': crush['tunables'],
            'compat_weight_set': '-1' in crush['choose_args'],
            'num_weight_sets': len(crush['choose_args']),
            'bucket_algs': bucket_algs,
            'bucket_sizes': bucket_sizes,
            'bucket_types': bucket_types,
        }

    def gather_configs(self) -> Dict[str, List[str]]:
        # cluster config options
        cluster = set()
        r, outb, outs = self.mon_command({
            'prefix': 'config dump',
            'format': 'json'
        })
        if r != 0:
            return {}
        try:
            dump = json.loads(outb)
        except json.decoder.JSONDecodeError:
            return {}
        for opt in dump:
            name = opt.get('name')
            if name:
                cluster.add(name)
        # daemon-reported options (which may include ceph.conf)
        active = set()
        ls = self.get("modified_config_options")
        for opt in ls.get('options', {}):
            active.add(opt)
        return {
            'cluster_changed': sorted(list(cluster)),
            'active_changed': sorted(list(active)),
        }

    def get_stat_sum_per_pool(self) -> List[dict]:
        # Initialize 'result' list
        result: List[dict] = []

        # Create a list of pool ids that will later act as a queue, i.e.:
        #   pool_queue = [1, 2, 3]
        osd_map = self.get('osd_map')
        pool_queue = []
        for pool in osd_map['pools']:
            pool_queue.append(str(pool['pool']))

        # Populate 'result', i.e.:
        #   {
        #       'pool_id': '1'
        #       'stats_sum': {
        #           'num_bytes': 36,
        #           'num_bytes_hit_set_archive': 0,
        #           ...
        #           'num_write_kb': 0
        #           }
        #       }
        #   }
        while pool_queue:
            # Pop the current pool id out of pool_queue
            curr_pool_id = pool_queue.pop(0)

            # Initialize a dict that will hold aggregated stats for the current pool
            compiled_stats_dict: Dict[str, Any] = defaultdict(lambda: defaultdict(int))

            # Find out which pgs belong to the current pool and add up
            # their stats
            pg_dump = self.get('pg_dump')
            for pg in pg_dump['pg_stats']:
                pool_id = pg['pgid'].split('.')[0]
                if pool_id == curr_pool_id:
                    compiled_stats_dict['pool_id'] = int(pool_id)
                    for metric in pg['stat_sum']:
                        compiled_stats_dict['stats_sum'][metric] += pg['stat_sum'][metric]
                else:
                    continue
            # 'compiled_stats_dict' now holds all stats pertaining to
            # the current pool. Adding it to the list of results.
            result.append(compiled_stats_dict)

        return result

    def get_osd_histograms(self) -> List[Dict[str, dict]]:
        # Initialize result dict
        result: Dict[str, dict] = defaultdict(lambda: defaultdict(
                                              lambda: defaultdict(
                                              lambda: defaultdict(
                                              lambda: defaultdict(
                                              lambda: defaultdict(int))))))

        # Get list of osd ids from the metadata
        osd_metadata = self.get('osd_metadata')
        
        # Grab output from the "osd.x perf histogram dump" command
        for osd_id in osd_metadata:
            cmd_dict = {
                'prefix': 'perf histogram dump',
                'id': str(osd_id),
                'format': 'json'
            }
            r, outb, outs = self.osd_command(cmd_dict)
            # Check for invalid calls
            if r != 0:
                self.log.debug("Invalid command dictionary.")
                continue
            else:
                try:
                    # This is where the histograms will land if there are any.
                    dump = json.loads(outb)

                    for histogram in dump['osd']:
                        # Log axis information. There are two axes, each represented
                        # as a dictionary. Both dictionaries are contained inside a
                        # list called 'axes'.
                        axes = []
                        for axis in dump['osd'][histogram]['axes']:

                            # This is the dict that contains information for an individual
                            # axis. It will be appended to the 'axes' list at the end.
                            axis_dict: Dict[str, Any] = defaultdict()

                            # Collecting information for buckets, min, name, etc.
                            axis_dict['buckets'] = axis['buckets']
                            axis_dict['min'] = axis['min']
                            axis_dict['name'] = axis['name']
                            axis_dict['quant_size'] = axis['quant_size']
                            axis_dict['scale_type'] = axis['scale_type']

                            # Collecting ranges; placing them in lists to
                            # improve readability later on.
                            ranges = []
                            for _range in axis['ranges']:
                                _max, _min = None, None
                                if 'max' in _range:
                                    _max = _range['max']
                                if 'min' in _range:
                                    _min = _range['min']
                                ranges.append([_min, _max])
                            axis_dict['ranges'] = ranges

                            # Now that 'axis_dict' contains all the appropriate
                            # information for the current axis, append it to the 'axes' list.
                            # There will end up being two axes in the 'axes' list, since the
                            # histograms are 2D.
                            axes.append(axis_dict)

                        # Add the 'axes' list, containing both axes, to result.
                        # At this point, you will see that the name of the key is the string
                        # form of our axes list (str(axes)). This is there so that histograms
                        # with different axis configs will not be combined.
                        # The key names are modified into something more readable ('config_x')
                        # down below.
                        result[str(axes)][histogram]['axes'] = axes

                        # Collect current values and make sure they are in
                        # integer form.
                        values = []
                        for value_list in dump['osd'][histogram]['values']:
                            values.append([int(v) for v in value_list])

                        # Aggregate values. If 'values' have already been initialized,
                        # we can safely add.
                        if 'values' in result[str(axes)][histogram]:
                            for i in range (0, len(values)):
                                for j in range (0, len(values[i])):
                                    values[i][j] += result[str(axes)][histogram]['values'][i][j]

                        # Add the values to result.
                        result[str(axes)][histogram]['values'] = values

                        # Update num_combined_osds
                        if 'num_combined_osds' not in result[str(axes)][histogram]:
                            result[str(axes)][histogram]['num_combined_osds'] = 1
                        else:
                            result[str(axes)][histogram]['num_combined_osds'] += 1

                # Sometimes, json errors occur if you give it an empty string.
                # I am also putting in a catch for a KeyError since it could
                # happen where the code is assuming that a key exists in the
                # schema when it doesn't. In either case, we'll handle that
                # by returning an empty dict.
                except (json.decoder.JSONDecodeError, KeyError) as e:
                    self.log.debug("Error caught: {}".format(e))
                    return list()

        return list(result.values())

    def get_io_rate(self) -> dict:
        return self.get('io_rate')

    def gather_crashinfo(self) -> List[Dict[str, str]]:
        crashlist: List[Dict[str, str]] = list()
        errno, crashids, err = self.remote('crash', 'ls')
        if errno:
            return crashlist
        for crashid in crashids.split():
            errno, crashinfo, err = self.remote('crash', 'do_info', crashid)
            if errno:
                continue
            c = json.loads(crashinfo)

            # redact hostname
            del c['utsname_hostname']

            # entity_name might have more than one '.', beware
            (etype, eid) = c.get('entity_name', '').split('.', 1)
            m = hashlib.sha1()
            assert self.salt
            m.update(self.salt.encode('utf-8'))
            m.update(eid.encode('utf-8'))
            m.update(self.salt.encode('utf-8'))
            c['entity_name'] = etype + '.' + m.hexdigest()

            # redact final line of python tracebacks, as the exception
            # payload may contain identifying information
            if 'mgr_module' in c:
                c['backtrace'][-1] = '<redacted>'

            crashlist.append(c)
        return crashlist

    def gather_perf_counters(self) -> Dict[str, dict]:
        # Extract perf counter data with get_all_perf_counters(), a method
        # from mgr/mgr_module.py. This method returns a nested dictionary that
        # looks a lot like perf schema, except with some additional fields.
        #
        # Example of output, a snapshot of a mon daemon:
        #   "mon.b": {
        #       "bluestore.kv_flush_lat": {
        #           "count": 2431,
        #           "description": "Average kv_thread flush latency",
        #           "nick": "fl_l",
        #           "priority": 8,
        #           "type": 5,
        #           "units": 1,
        #           "value": 88814109
        #       },
        #   },
        all_perf_counters = self.get_all_perf_counters()

        # Initialize 'result' dict
        result: Dict[str, dict] = defaultdict(lambda: defaultdict(
            lambda: defaultdict(lambda: defaultdict(int))))

        # Condense metrics among like daemons (i.e. 'osd' = 'osd.0' +
        # 'osd.1' + 'osd.2'), and update the 'result' dict.
        for daemon in all_perf_counters:
            daemon_type = daemon[0:3] # i.e. 'mds', 'osd', 'rgw'
            for collection in all_perf_counters[daemon]:

                # Split the collection to avoid redundancy in final report; i.e.:
                #   bluestore.kv_flush_lat, bluestore.kv_final_lat --> 
                #   bluestore: kv_flush_lat, kv_final_lat
                col_0, col_1 = collection.split('.')

                # Debug log for empty keys. This initially was a problem for prioritycache
                # perf counters, where the col_0 was empty for certain mon counters:
                #
                # "mon.a": {                  instead of    "mon.a": {
                #      "": {                                     "prioritycache": {
                #        "cache_bytes": {...},                          "cache_bytes": {...},
                #
                # This log is here to detect any future instances of a similar issue.
                if (daemon == "") or (col_0 == "") or (col_1 == ""):
                    self.log.debug("Instance of an empty key: {}{}".format(daemon, collection))

                # Not every rgw daemon has the same schema. Specifically, each rgw daemon
                # has a uniquely-named collection that starts off identically (i.e.
                # "objecter-0x...") then diverges (i.e. "...55f4e778e140.op_rmw").
                # This bit of code combines these unique counters all under one rgw instance.
                # Without this check, the schema would remain separeted out in the final report.
                if col_0[0:11] == "objecter-0x":
                    col_0 = "objecter-0x"

                # Check that the value can be incremented. In some cases,
                # the files are of type 'pair' (real-integer-pair, integer-integer pair).
                # In those cases, the value is a dictionary, and not a number.
                #   i.e. throttle-msgr_dispatch_throttler-hbserver["wait"]
                if isinstance(all_perf_counters[daemon][collection]['value'], numbers.Number):
                    result[daemon_type][col_0][col_1]['value'] += \
                            all_perf_counters[daemon][collection]['value']
                
                # Check that 'count' exists, as not all counters have a count field. 
                if 'count' in all_perf_counters[daemon][collection]:
                    result[daemon_type][col_0][col_1]['count'] += \
                            all_perf_counters[daemon][collection]['count']

        return result

    def get_active_channels(self) -> List[str]:
        r = []
        if self.channel_basic:
            r.append('basic')
        if self.channel_crash:
            r.append('crash')
        if self.channel_device:
            r.append('device')
        if self.channel_ident:
            r.append('ident')
        if self.channel_perf:
            r.append('perf')
        return r

    def gather_device_report(self) -> Dict[str, Dict[str, Dict[str, str]]]:
        try:
            time_format = self.remote('devicehealth', 'get_time_format')
        except Exception:
            return {}
        cutoff = datetime.utcnow() - timedelta(hours=self.interval * 2)
        min_sample = cutoff.strftime(time_format)

        devices = self.get('devices')['devices']

        # anon-host-id -> anon-devid -> { timestamp -> record }
        res: Dict[str, Dict[str, Dict[str, str]]] = {}
        for d in devices:
            devid = d['devid']
            try:
                # this is a map of stamp -> {device info}
                m = self.remote('devicehealth', 'get_recent_device_metrics',
                                devid, min_sample)
            except Exception:
                continue

            # anonymize host id
            try:
                host = d['location'][0]['host']
            except KeyError:
                continue
            anon_host = self.get_store('host-id/%s' % host)
            if not anon_host:
                anon_host = str(uuid.uuid1())
                self.set_store('host-id/%s' % host, anon_host)
            serial = None
            for dev, rep in m.items():
                rep['host_id'] = anon_host
                if serial is None and 'serial_number' in rep:
                    serial = rep['serial_number']

            # anonymize device id
            anon_devid = self.get_store('devid-id/%s' % devid)
            if not anon_devid:
                # ideally devid is 'vendor_model_serial',
                # but can also be 'model_serial', 'serial'
                if '_' in devid:
                    anon_devid = f"{devid.rsplit('_', 1)[0]}_{uuid.uuid1()}"
                else:
                    anon_devid = str(uuid.uuid1())
                self.set_store('devid-id/%s' % devid, anon_devid)
            self.log.info('devid %s / %s, host %s / %s' % (devid, anon_devid,
                                                           host, anon_host))

            # anonymize the smartctl report itself
            if serial:
                m_str = json.dumps(m)
                m = json.loads(m_str.replace(serial, 'deleted'))

            if anon_host not in res:
                res[anon_host] = {}
            res[anon_host][anon_devid] = m
        return res

    def get_latest(self, daemon_type: str, daemon_name: str, stat: str) -> int:
        data = self.get_counter(daemon_type, daemon_name, stat)[stat]
        if data:
            return data[-1][1]
        else:
            return 0

    def compile_report(self, channels: Optional[List[str]] = None) -> Dict[str, Any]:
        if not channels:
            channels = self.get_active_channels()
        report = {
            'leaderboard': self.leaderboard,
            'report_version': 1,
            'report_timestamp': datetime.utcnow().isoformat(),
            'report_id': self.report_id,
            'channels': channels,
            'channels_available': ALL_CHANNELS,
            'license': LICENSE,
        }

        if 'ident' in channels:
            for option in ['description', 'contact', 'organization']:
                report[option] = getattr(self, option)

        if 'basic' in channels:
            mon_map = self.get('mon_map')
            osd_map = self.get('osd_map')
            service_map = self.get('service_map')
            fs_map = self.get('fs_map')
            df = self.get('df')

            report['created'] = mon_map['created']

            # mons
            v1_mons = 0
            v2_mons = 0
            ipv4_mons = 0
            ipv6_mons = 0
            for mon in mon_map['mons']:
                for a in mon['public_addrs']['addrvec']:
                    if a['type'] == 'v2':
                        v2_mons += 1
                    elif a['type'] == 'v1':
                        v1_mons += 1
                    if a['addr'].startswith('['):
                        ipv6_mons += 1
                    else:
                        ipv4_mons += 1
            report['mon'] = {
                'count': len(mon_map['mons']),
                'features': mon_map['features'],
                'min_mon_release': mon_map['min_mon_release'],
                'v1_addr_mons': v1_mons,
                'v2_addr_mons': v2_mons,
                'ipv4_addr_mons': ipv4_mons,
                'ipv6_addr_mons': ipv6_mons,
            }

            report['config'] = self.gather_configs()

            # pools

            rbd_num_pools = 0
            rbd_num_images_by_pool = []
            rbd_mirroring_by_pool = []
            num_pg = 0
            report['pools'] = list()
            for pool in osd_map['pools']:
                num_pg += pool['pg_num']
                ec_profile = {}
                if pool['erasure_code_profile']:
                    orig = osd_map['erasure_code_profiles'].get(
                        pool['erasure_code_profile'], {})
                    ec_profile = {
                        k: orig[k] for k in orig.keys()
                        if k in ['k', 'm', 'plugin', 'technique',
                                 'crush-failure-domain', 'l']
                    }
                cast(List[Dict[str, Any]], report['pools']).append(
                    {
                        'pool': pool['pool'],
                        'pg_num': pool['pg_num'],
                        'pgp_num': pool['pg_placement_num'],
                        'size': pool['size'],
                        'min_size': pool['min_size'],
                        'pg_autoscale_mode': pool['pg_autoscale_mode'],
                        'target_max_bytes': pool['target_max_bytes'],
                        'target_max_objects': pool['target_max_objects'],
                        'type': ['', 'replicated', '', 'erasure'][pool['type']],
                        'erasure_code_profile': ec_profile,
                        'cache_mode': pool['cache_mode'],
                    }
                )
                if 'rbd' in pool['application_metadata']:
                    rbd_num_pools += 1
                    ioctx = self.rados.open_ioctx(pool['pool_name'])
                    rbd_num_images_by_pool.append(
                        sum(1 for _ in rbd.RBD().list2(ioctx)))
                    rbd_mirroring_by_pool.append(
                        rbd.RBD().mirror_mode_get(ioctx) != rbd.RBD_MIRROR_MODE_DISABLED)
            report['rbd'] = {
                'num_pools': rbd_num_pools,
                'num_images_by_pool': rbd_num_images_by_pool,
                'mirroring_by_pool': rbd_mirroring_by_pool}

            # osds
            cluster_network = False
            for osd in osd_map['osds']:
                if osd['up'] and not cluster_network:
                    front_ip = osd['public_addrs']['addrvec'][0]['addr'].split(':')[0]
                    back_ip = osd['cluster_addrs']['addrvec'][0]['addr'].split(':')[0]
                    if front_ip != back_ip:
                        cluster_network = True
            report['osd'] = {
                'count': len(osd_map['osds']),
                'require_osd_release': osd_map['require_osd_release'],
                'require_min_compat_client': osd_map['require_min_compat_client'],
                'cluster_network': cluster_network,
            }

            # crush
            report['crush'] = self.gather_crush_info()

            # cephfs
            report['fs'] = {
                'count': len(fs_map['filesystems']),
                'feature_flags': fs_map['feature_flags'],
                'num_standby_mds': len(fs_map['standbys']),
                'filesystems': [],
            }
            num_mds = len(fs_map['standbys'])
            for fsm in fs_map['filesystems']:
                fs = fsm['mdsmap']
                num_sessions = 0
                cached_ino = 0
                cached_dn = 0
                cached_cap = 0
                subtrees = 0
                rfiles = 0
                rbytes = 0
                rsnaps = 0
                for gid, mds in fs['info'].items():
                    num_sessions += self.get_latest('mds', mds['name'],
                                                    'mds_sessions.session_count')
                    cached_ino += self.get_latest('mds', mds['name'],
                                                  'mds_mem.ino')
                    cached_dn += self.get_latest('mds', mds['name'],
                                                 'mds_mem.dn')
                    cached_cap += self.get_latest('mds', mds['name'],
                                                  'mds_mem.cap')
                    subtrees += self.get_latest('mds', mds['name'],
                                                'mds.subtrees')
                    if mds['rank'] == 0:
                        rfiles = self.get_latest('mds', mds['name'],
                                                 'mds.root_rfiles')
                        rbytes = self.get_latest('mds', mds['name'],
                                                 'mds.root_rbytes')
                        rsnaps = self.get_latest('mds', mds['name'],
                                                 'mds.root_rsnaps')
                report['fs']['filesystems'].append({  # type: ignore
                    'max_mds': fs['max_mds'],
                    'ever_allowed_features': fs['ever_allowed_features'],
                    'explicitly_allowed_features': fs['explicitly_allowed_features'],
                    'num_in': len(fs['in']),
                    'num_up': len(fs['up']),
                    'num_standby_replay': len(
                        [mds for gid, mds in fs['info'].items()
                         if mds['state'] == 'up:standby-replay']),
                    'num_mds': len(fs['info']),
                    'num_sessions': num_sessions,
                    'cached_inos': cached_ino,
                    'cached_dns': cached_dn,
                    'cached_caps': cached_cap,
                    'cached_subtrees': subtrees,
                    'balancer_enabled': len(fs['balancer']) > 0,
                    'num_data_pools': len(fs['data_pools']),
                    'standby_count_wanted': fs['standby_count_wanted'],
                    'approx_ctime': fs['created'][0:7],
                    'files': rfiles,
                    'bytes': rbytes,
                    'snaps': rsnaps,
                })
                num_mds += len(fs['info'])
            report['fs']['total_num_mds'] = num_mds  # type: ignore

            # daemons
            report['metadata'] = dict(osd=self.gather_osd_metadata(osd_map),
                                      mon=self.gather_mon_metadata(mon_map))

            # host counts
            servers = self.list_servers()
            self.log.debug('servers %s' % servers)
            hosts = {
                'num': len([h for h in servers if h['hostname']]),
            }
            for t in ['mon', 'mds', 'osd', 'mgr']:
                nr_services = sum(1 for host in servers if
                                  any(service for service in cast(List[ServiceInfoT],
                                                                  host['services'])
                                      if service['type'] == t))
                hosts['num_with_' + t] = nr_services
            report['hosts'] = hosts

            report['usage'] = {
                'pools': len(df['pools']),
                'pg_num': num_pg,
                'total_used_bytes': df['stats']['total_used_bytes'],
                'total_bytes': df['stats']['total_bytes'],
                'total_avail_bytes': df['stats']['total_avail_bytes']
            }

            services: DefaultDict[str, int] = defaultdict(int)
            for key, value in service_map['services'].items():
                services[key] += 1
                if key == 'rgw':
                    rgw = {}
                    zones = set()
                    zonegroups = set()
                    frontends = set()
                    count = 0
                    d = value.get('daemons', dict())
                    for k, v in d.items():
                        if k == 'summary' and v:
                            rgw[k] = v
                        elif isinstance(v, dict) and 'metadata' in v:
                            count += 1
                            zones.add(v['metadata']['zone_id'])
                            zonegroups.add(v['metadata']['zonegroup_id'])
                            frontends.add(v['metadata']['frontend_type#0'])

                            # we could actually iterate over all the keys of
                            # the dict and check for how many frontends there
                            # are, but it is unlikely that one would be running
                            # more than 2 supported ones
                            f2 = v['metadata'].get('frontend_type#1', None)
                            if f2:
                                frontends.add(f2)

                    rgw['count'] = count
                    rgw['zones'] = len(zones)
                    rgw['zonegroups'] = len(zonegroups)
                    rgw['frontends'] = list(frontends)  # sets aren't json-serializable
                    report['rgw'] = rgw
            report['services'] = services

            try:
                report['balancer'] = self.remote('balancer', 'gather_telemetry')
            except ImportError:
                report['balancer'] = {
                    'active': False
                }

        if 'crash' in channels:
            report['crashes'] = self.gather_crashinfo()

        if 'perf' in channels:
            report['perf_counters'] = self.gather_perf_counters()
            report['stat_sum_per_pool'] = self.get_stat_sum_per_pool()
            report['io_rate'] = self.get_io_rate()
            report['osd_perf_histograms'] = self.get_osd_histograms()

        # NOTE: We do not include the 'device' channel in this report; it is
        # sent to a different endpoint.

        return report

    def _try_post(self, what: str, url: str, report: Dict[str, Dict[str, str]]) -> Optional[str]:
        self.log.info('Sending %s to: %s' % (what, url))
        proxies = dict()
        if self.proxy:
            self.log.info('Send using HTTP(S) proxy: %s', self.proxy)
            proxies['http'] = self.proxy
            proxies['https'] = self.proxy
        try:
            resp = requests.put(url=url, json=report, proxies=proxies)
            resp.raise_for_status()
        except Exception as e:
            fail_reason = 'Failed to send %s to %s: %s' % (what, url, str(e))
            self.log.error(fail_reason)
            return fail_reason
        return None

    class EndPoint(enum.Enum):
        ceph = 'ceph'
        device = 'device'

    def send(self,
             report: Dict[str, Dict[str, str]],
             endpoint: Optional[List[EndPoint]] = None) -> Tuple[int, str, str]:
        if not endpoint:
            endpoint = [self.EndPoint.ceph, self.EndPoint.device]
        failed = []
        success = []
        self.log.debug('Send endpoints %s' % endpoint)
        for e in endpoint:
            if e == self.EndPoint.ceph:
                fail_reason = self._try_post('ceph report', self.url, report)
                if fail_reason:
                    failed.append(fail_reason)
                else:
                    now = int(time.time())
                    self.last_upload = now
                    self.set_store('last_upload', str(now))
                    success.append('Ceph report sent to {0}'.format(self.url))
                    self.log.info('Sent report to {0}'.format(self.url))
            elif e == self.EndPoint.device:
                if 'device' in self.get_active_channels():
                    devices = self.gather_device_report()
                    assert devices
                    num_devs = 0
                    num_hosts = 0
                    for host, ls in devices.items():
                        self.log.debug('host %s devices %s' % (host, ls))
                        if not len(ls):
                            continue
                        fail_reason = self._try_post('devices', self.device_url,
                                                     ls)
                        if fail_reason:
                            failed.append(fail_reason)
                        else:
                            num_devs += len(ls)
                            num_hosts += 1
                    if num_devs:
                        success.append('Reported %d devices across %d hosts' % (
                            num_devs, len(devices)))
        if failed:
            return 1, '', '\n'.join(success + failed)
        return 0, '', '\n'.join(success)

    @CLIReadCommand('telemetry status')
    def status(self) -> Tuple[int, str, str]:
        '''
        Show current configuration
        '''
        r = {}
        for opt in self.MODULE_OPTIONS:
            r[opt['name']] = getattr(self, opt['name'])
        r['last_upload'] = (time.ctime(self.last_upload)
                            if self.last_upload else self.last_upload)
        return 0, json.dumps(r, indent=4, sort_keys=True), ''

    @CLICommand('telemetry on')
    def on(self, license: Optional[str] = None) -> Tuple[int, str, str]:
        '''
        Enable telemetry reports from this cluster
        '''
        if license != LICENSE:
            return -errno.EPERM, '', f'''Telemetry data is licensed under the {LICENSE_NAME} ({LICENSE_URL}).
To enable, add '--license {LICENSE}' to the 'ceph telemetry on' command.'''
        else:
            self.set_module_option('enabled', True)
            self.set_module_option('last_opt_revision', REVISION)
            return 0, '', ''

    @CLICommand('telemetry off')
    def off(self) -> Tuple[int, str, str]:
        '''
        Disable telemetry reports from this cluster
        '''
        self.set_module_option('enabled', False)
        self.set_module_option('last_opt_revision', 1)
        return 0, '', ''

    @CLICommand('telemetry send')
    def do_send(self,
                endpoint: Optional[List[EndPoint]] = None,
                license: Optional[str] = None) -> Tuple[int, str, str]:
        if self.last_opt_revision < LAST_REVISION_RE_OPT_IN and license != LICENSE:
            self.log.debug(('A telemetry send attempt while opted-out. '
                            'Asking for license agreement'))
            return -errno.EPERM, '', f'''Telemetry data is licensed under the {LICENSE_NAME} ({LICENSE_URL}).
To manually send telemetry data, add '--license {LICENSE}' to the 'ceph telemetry send' command.
Please consider enabling the telemetry module with 'ceph telemetry on'.'''
        else:
            self.last_report = self.compile_report()
            return self.send(self.last_report, endpoint)

    @CLIReadCommand('telemetry show')
    def show(self, channels: Optional[List[str]] = None) -> Tuple[int, str, str]:
        '''
        Show report of all channels
        '''
        report = self.get_report(channels=channels)

        # Formatting the perf histograms so they are human-readable. This will change the
        # ranges and values, which are currently in list form, into strings so that
        # they are displayed horizontally instead of vertically.
        try:
            for config in report['osd_perf_histograms']:
                for histogram in config:
                    # Adjust ranges by converting lists into strings
                    for axis in config[histogram]['axes']:
                        for i in range(0, len(axis['ranges'])):
                            axis['ranges'][i] = str(axis['ranges'][i])
                    # Adjust values by converting lists into strings
                    for i in range(0, len(config[histogram]['values'])):
                        config[histogram]['values'][i] = str(config[histogram]['values'][i])
        except KeyError:
            # If the perf channel is not enabled, there should be a KeyError since
            # 'osd_perf_histograms' would not be present in the report. In that case,
            # the show function should pass as usual without trying to format the
            # histograms.
            pass

        report = json.dumps(report, indent=4, sort_keys=True)
        if self.channel_device:
            report += '''

Device report is generated separately. To see it run 'ceph telemetry show-device'.'''
        return 0, report, ''

    @CLIReadCommand('telemetry show-device')
    def show_device(self) -> Tuple[int, str, str]:
        return 0, json.dumps(self.get_report('device'), indent=4, sort_keys=True), ''

    @CLIReadCommand('telemetry show-all')
    def show_all(self) -> Tuple[int, str, str]:
        return 0, json.dumps(self.get_report('all'), indent=4, sort_keys=True), ''

    def get_report(self,
                   report_type: str = 'default',
                   channels: Optional[List[str]] = None) -> Dict[str, Any]:
        if report_type == 'default':
            return self.compile_report(channels=channels)
        elif report_type == 'device':
            return self.gather_device_report()
        elif report_type == 'all':
            return {'report': self.compile_report(channels=channels),
                    'device_report': self.gather_device_report()}
        return {}

    def self_test(self) -> None:
        report = self.compile_report()
        if len(report) == 0:
            raise RuntimeError('Report is empty')

        if 'report_id' not in report:
            raise RuntimeError('report_id not found in report')

    def shutdown(self) -> None:
        self.run = False
        self.event.set()

    def refresh_health_checks(self) -> None:
        health_checks = {}
        if self.enabled and self.last_opt_revision < LAST_REVISION_RE_OPT_IN:
            health_checks['TELEMETRY_CHANGED'] = {
                'severity': 'warning',
                'summary': 'Telemetry requires re-opt-in',
                'detail': [
                    'telemetry report includes new information; must re-opt-in (or out)'
                ]
            }
        self.set_health_checks(health_checks)

    def serve(self) -> None:
        self.load()
        self.config_notify()
        self.run = True

        self.log.debug('Waiting for mgr to warm up')
        self.event.wait(10)

        while self.run:
            self.event.clear()

            self.refresh_health_checks()

            if self.last_opt_revision < LAST_REVISION_RE_OPT_IN:
                self.log.debug('Not sending report until user re-opts-in')
                self.event.wait(1800)
                continue
            if not self.enabled:
                self.log.debug('Not sending report until configured to do so')
                self.event.wait(1800)
                continue

            now = int(time.time())
            if not self.last_upload or \
               (now - self.last_upload) > self.interval * 3600:
                self.log.info('Compiling and sending report to %s',
                              self.url)

                try:
                    self.last_report = self.compile_report()
                except Exception:
                    self.log.exception('Exception while compiling report:')

                self.send(self.last_report)
            else:
                self.log.debug('Interval for sending new report has not expired')

            sleep = 3600
            self.log.debug('Sleeping for %d seconds', sleep)
            self.event.wait(sleep)

    @staticmethod
    def can_run() -> Tuple[bool, str]:
        return True, ''
Beispiel #57
0
class Cuckoo(object):
    """ Parent class, defines interface to Cuckoo. """
    def __init__(self, job_queue):
        self.job_queue = job_queue
        self.shutdown_requested = Event()
        self.shutdown_requested.clear()
        self.running_jobs = {}
        # reentrant because we're doing nested calls within critical sections
        self.running_jobs_lock = RLock()

    def register_running_job(self, job_id, sample):
        """ Register a job as running. Detect if another sample has already
        been registered with the same job ID which obviously must never happen
        because it corrupts our internal housekeeping. Guarded by a lock
        because multiple worker threads will call this routine and check for
        collision and update of job log might otherwise race each other.

        @param job_id: ID of the job to register as running.
        @type job_id: int
        @param sample: Sample object to associate with this job ID
        @type sample: Sample

        @returns: None
        @raises: CuckooSubmitFailedException on job id collision """
        with self.running_jobs_lock:
            if (job_id in self.running_jobs
                    and self.running_jobs[job_id] is not sample):
                raise CuckooSubmitFailedException(
                    'A job with ID %d is already registered as running '
                    'for sample %s' % (job_id, self.running_jobs[job_id]))

            self.running_jobs[job_id] = CuckooJob(sample)

    def deregister_running_job(self, job_id):
        """ Deregister a running job by job id.

        @returns: Sample object of the job or None if job not found. """
        with self.running_jobs_lock:
            job = self.running_jobs.pop(job_id, None)
            if job is not None:
                return job.sample

        return None

    def deregister_running_job_if_too_old(self, job_id, max_age):
        """ Check if a job has gotten too old and remove it from the list of
        running jobs if so.

        @returns: Sample object of the job or None if job not found. """
        with self.running_jobs_lock:
            if self.running_jobs[job_id].is_older_than(max_age):
                return self.deregister_running_job(job_id)

        return None

    def resubmit_with_report(self, job_id):
        """ Resubmit a sample to the job queue after the report became
        available. Retrieves the report from Cuckoo.

        @param job_id: ID of job which has finished.
        @type job_id: int

        @returns: None """
        logger.debug("Analysis done for task #%d" % job_id)

        sample = self.deregister_running_job(job_id)
        if sample is None:
            logger.debug('No sample found for job ID %d', job_id)
            return None

        logger.debug('Requesting Cuckoo report for sample %s', sample)
        report = self.get_report(job_id)
        if report is None:
            # mark analysis as failed if we could not get the report e.g.
            # because it was corrupted or the API connection failed.
            sample.mark_cuckoo_failure()
        else:
            reportobj = CuckooReport(report)
            sample.register_cuckoo_report(reportobj)

        self.job_queue.submit(sample, self.__class__)
        return None

    def resubmit_as_failed_if_too_old(self, job_id, max_age):
        """ Resubmit a sample to the job queue with a failure report if the
        Cuckoo job has been running for too long.

        @param job_id: ID of job to check.
        @type job_id: int
        @param max_age: maximum job age in seconds
        @type max_age: int
        """
        sample = self.deregister_running_job_if_too_old(job_id, max_age)
        if sample is not None:
            logger.warning(
                "Dropped job %d because it has been running for "
                "too long", job_id)
            sample.mark_cuckoo_failure()
            self.job_queue.submit(sample, self.__class__)

    def shut_down(self):
        """ Request the module to shut down. """
        self.shutdown_requested.set()

    def reap_children(self):
        pass

    def get_report(self, job_id):
        """ Extract the report of a finished analysis from Cuckoo. To be
        overridden by derived classes for actual implementation. """
        raise NotImplementedError
Beispiel #58
0
def main():
    parser = argparse.ArgumentParser(description='World Models ' + ID)
    parser.add_argument('--data_dir',
                        '-d',
                        default="/data/wm",
                        help='The base data/output directory')
    parser.add_argument(
        '--game', default='CarRacing-v0',
        help='Game to use')  # https://gym.openai.com/envs/CarRacing-v0/
    parser.add_argument('--experiment_name',
                        default='experiment_1',
                        help='To isolate its files from others')
    parser.add_argument('--model',
                        '-m',
                        default='',
                        help='Initialize the model from given file')
    parser.add_argument('--no_resume',
                        action='store_true',
                        help='Don'
                        't auto resume from the latest snapshot')
    parser.add_argument(
        '--resume_from',
        '-r',
        default='',
        help='Resume the optimization from a specific snapshot')
    parser.add_argument('--hidden_dim',
                        default=256,
                        type=int,
                        help='LSTM hidden units')
    parser.add_argument('--z_dim',
                        '-z',
                        default=32,
                        type=int,
                        help='dimension of encoded vector')
    parser.add_argument('--mixtures',
                        default=5,
                        type=int,
                        help='number of gaussian mixtures for MDN')
    parser.add_argument('--lambda_',
                        "-l",
                        default=7,
                        type=int,
                        help='Population size for CMA-ES')
    parser.add_argument(
        '--mu',
        default=0.5,
        type=float,
        help='Keep this percent of fittest mutations for CMA-ES')
    parser.add_argument(
        '--trials',
        default=3,
        type=int,
        help=
        'The number of trials per mutation for CMA-ES, to average fitness score over'
    )
    parser.add_argument('--target_cumulative_reward',
                        default=900,
                        type=int,
                        help='Target cumulative reward')
    parser.add_argument('--frame_resize',
                        default=64,
                        type=int,
                        help='h x w resize of each observation frame')
    parser.add_argument('--temperature',
                        '-t',
                        default=1.0,
                        type=float,
                        help='Temperature (tau) for MDN-RNN (model)')
    parser.add_argument('--snapshot_interval',
                        '-s',
                        default=5,
                        type=int,
                        help='snapshot every x generations of evolution')
    parser.add_argument(
        '--cluster_mode',
        action='store_true',
        help=
        'If in a distributed cpu cluster. Set CLUSTER_ variables accordingly.')
    parser.add_argument(
        '--test',
        action='store_true',
        help=
        'Generate a rollout gif only (must have access to saved snapshot or model)'
    )
    parser.add_argument('--gpu',
                        '-g',
                        default=-1,
                        type=int,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--gpus',
                        default="",
                        help='A list of gpus to use, i.e. "0,1,2,3"')
    parser.add_argument(
        '--curriculum',
        default="",
        help='initial,step e.g. 50,5 starts at 50 steps and adds 5 steps')
    parser.add_argument('--predict_done',
                        action='store_true',
                        help='Whether MDN-RNN should also predict done state')
    parser.add_argument('--done_threshold',
                        default=0.5,
                        type=float,
                        help='What done probability really means done')
    parser.add_argument(
        '--weights_type',
        default=1,
        type=int,
        help="1=action_dim*(z_dim+hidden_dim), 2=z_dim+2*hidden_dim")
    parser.add_argument('--in_dream',
                        action='store_true',
                        help='Whether to train in dream, or real environment')
    parser.add_argument('--dream_max_len',
                        default=2100,
                        type=int,
                        help="Maximum timesteps for dream to avoid runaway")
    parser.add_argument(
        '--cores',
        default=0,
        type=int,
        help='# CPU cores for main CMA-ES loop in non-cluster_mode. 0=all cores'
    )
    parser.add_argument(
        '--initial_z_size',
        default=10000,
        type=int,
        help="How many real initial frames to load for dream training")
    parser.add_argument(
        '--initial_z_noise',
        default=0.,
        type=float,
        help="Gaussian noise std for initial z for dream training")
    parser.add_argument(
        '--cluster_max_wait',
        default=5400,
        type=int,
        help="Move on after this many seconds of no response from worker(s)")

    args = parser.parse_args()
    log(ID, "args =\n " + str(vars(args)).replace(",", ",\n "))

    hostname = socket.gethostname().split(".")[0]
    if args.gpus:
        args.gpus = [int(item) for item in args.gpus.split(',')]
    if args.curriculum:
        curriculum_start = int(args.curriculum.split(',')[0])
        curriculum_step = int(args.curriculum.split(',')[1])

    output_dir = os.path.join(args.data_dir, args.game, args.experiment_name,
                              ID)
    mkdir(output_dir)
    model_dir = os.path.join(args.data_dir, args.game, args.experiment_name,
                             'model')
    vision_dir = os.path.join(args.data_dir, args.game, args.experiment_name,
                              'vision')
    random_rollouts_dir = os.path.join(args.data_dir, args.game,
                                       args.experiment_name, 'random_rollouts')

    model = MDN_RNN(args.hidden_dim, args.z_dim, args.mixtures,
                    args.predict_done)
    chainer.serializers.load_npz(os.path.join(model_dir, "model.model"), model)
    vision = CVAE(args.z_dim)
    chainer.serializers.load_npz(os.path.join(vision_dir, "vision.model"),
                                 vision)

    global initial_z_t
    if args.in_dream:
        log(ID,
            "Loading random rollouts for initial frames for dream training")
        initial_z_t = ModelDataset(dir=random_rollouts_dir,
                                   load_batch_size=args.initial_z_size,
                                   verbose=False)

    if args.game in DOOM_GAMES:
        env = ViZDoomWrapper(args.game)
    else:
        env = gym.make(args.game)
    action_dim = len(env.action_space.low)
    args.action_dim = action_dim
    env = None

    auto_resume_file = None
    if not args.cluster_mode or (args.cluster_mode
                                 and hostname == CLUSTER_DISPATCHER):
        max_iter = 0
        files = os.listdir(output_dir)
        for file in files:
            if re.match(r'^snapshot_iter_', file):
                iter = int(re.search(r'\d+', file).group())
                if (iter > max_iter):
                    max_iter = iter
        if max_iter > 0:
            auto_resume_file = os.path.join(
                output_dir, "snapshot_iter_{}.npz".format(max_iter))

    resume = None
    if args.model:
        if args.model == 'default':
            args.model = os.path.join(output_dir, ID + ".model")
        log(ID, "Loading saved model from: " + args.model)
        resume = args.model
    elif args.resume_from:
        log(ID, "Resuming manually from snapshot: " + args.resume_from)
        resume = args.resume_from
    elif not args.no_resume and auto_resume_file is not None:
        log(ID, "Auto resuming from last snapshot: " + auto_resume_file)
        resume = auto_resume_file

    if resume is not None:
        npz = np.load(resume)
        pc = npz['pc']
        ps = npz['ps']
        B = npz['B']
        D = npz['D']
        C = npz['C']
        invsqrtC = npz['invsqrtC']
        eigeneval = npz['eigeneval']
        xmean = npz['xmean']
        sigma = npz['sigma']
        counteval = npz['counteval']
        generation = npz['generation'] + 1
        cumulative_rewards_over_generations = npz[
            'cumulative_rewards_over_generations']
        if args.curriculum:
            if 'max_timesteps' in npz and npz['max_timesteps'] is not None:
                max_timesteps = npz['max_timesteps']
            else:
                max_timesteps = curriculum_start
            last_highest_avg_cumulative_reward = max(
                cumulative_rewards_over_generations.mean(axis=1))
        else:
            max_timesteps = None
        npz.close()

    log(ID, "Starting")

    if args.cluster_mode and hostname != CLUSTER_DISPATCHER and not args.test:
        log(ID, "Starting cluster worker")
        WorkerServer(CLUSTER_WORKER_PORT, args, vision, model)
    elif not args.test:
        if args.cluster_mode:
            global cluster_cumulative_rewards
            cluster_event = Event()

            log(ID, "Starting cluster dispatcher")
            dispatcher_thread = Thread(target=DispatcherServer,
                                       args=(CLUSTER_DISPATCHER_PORT, args,
                                             cluster_event))
            dispatcher_thread.start()

            # Make the dispatcher a worker too
            log(ID, "Starting cluster worker")
            worker_thread = Thread(target=WorkerServer,
                                   args=(CLUSTER_WORKER_PORT, args, vision,
                                         model))
            worker_thread.start()

        if args.weights_type == 1:
            N = action_dim * (args.z_dim + args.hidden_dim) + action_dim
        elif args.weights_type == 2:
            N = args.z_dim + 2 * args.hidden_dim

        stopeval = 1e3 * N**2
        stopfitness = args.target_cumulative_reward

        lambda_ = args.lambda_  # 4+int(3*np.log(N))
        mu = int(lambda_ * args.mu)  # //2
        weights = np.log(mu + 1 / 2) - np.log(np.asarray(range(
            1, mu + 1))).astype(np.float32)
        weights = weights / np.sum(weights)
        mueff = (np.sum(weights)**2) / np.sum(weights**2)

        cc = (4 + mueff / N) / (N + 4 + 2 * mueff / N)
        cs = (mueff + 2) / (N + mueff + 5)
        c1 = 2 / ((N + 1.3)**2 + mueff)
        cmu = min(1 - c1, 2 * (mueff - 2 + 1 / mueff) / ((N + 2)**2 + mueff))
        damps = 1 + 2 * max(0, ((mueff - 1) / (N + 1))**0.5 - 1) + cs
        chiN = N**0.5 * (1 - 1 / (4 * N) + 1 / (21 * N**2))

        if resume is None:
            pc = np.zeros(N).astype(np.float32)
            ps = np.zeros(N).astype(np.float32)
            B = np.eye(N, N).astype(np.float32)
            D = np.ones(N).astype(np.float32)
            C = B * np.diag(D**2) * B.T
            invsqrtC = B * np.diag(D**-1) * B.T
            eigeneval = 0
            xmean = np.random.randn(N).astype(np.float32)
            sigma = 0.3
            counteval = 0
            generation = 1
            cumulative_rewards_over_generations = None
            if args.curriculum:
                max_timesteps = curriculum_start
                last_highest_avg_cumulative_reward = None
            else:
                max_timesteps = None

        solution_found = False
        while counteval < stopeval:
            log(ID, "> Starting evolution generation #" + str(generation))

            arfitness = np.zeros(lambda_).astype(np.float32)
            arx = np.zeros((lambda_, N)).astype(np.float32)
            for k in range(lambda_):
                arx[k] = xmean + sigma * B.dot(
                    D * np.random.randn(N).astype(np.float32))
                counteval += 1

            if not args.cluster_mode:
                if args.cores == 0:
                    cores = cpu_count()
                else:
                    cores = args.cores
                pool = Pool(cores)
                worker_arg_tuples = []
                for k in range(lambda_):
                    worker_arg_tuples.append(
                        (generation, k, args, vision, model, arx[k],
                         max_timesteps, False))
                cumulative_rewards = pool.map(rollout_worker,
                                              worker_arg_tuples)
                pool.close()
                pool.join()
                for k, cumulative_reward in enumerate(cumulative_rewards):
                    arfitness[k] = cumulative_reward
            else:
                arx_splits = np.array_split(arx, len(CLUSTER_WORKERS))
                indices = np.array_split(np.arange(lambda_),
                                         len(CLUSTER_WORKERS))
                cluster_cumulative_rewards = {}
                for i, chunked_mutations in enumerate(arx_splits):
                    log(
                        ID, "> Dispatching " + str(len(chunked_mutations)) +
                        " mutations to " + CLUSTER_WORKERS[i])
                    compressed_array = BytesIO()
                    np.savez_compressed(compressed_array,
                                        chunked_mutations=chunked_mutations,
                                        indices=indices[i],
                                        generation=generation,
                                        max_timesteps=max_timesteps)
                    compressed_array.seek(0)
                    out = compressed_array.read()

                    succeeded = False
                    for retries in range(3):
                        try:
                            sock = socket.socket(socket.AF_INET,
                                                 socket.SOCK_STREAM)
                            sock.settimeout(10)
                            sock.connect(
                                (CLUSTER_WORKERS[i], CLUSTER_WORKER_PORT))
                            sock.sendall(out)
                            sock.sendall(b"\r\n")
                            data = sock.recv(1024).decode("utf-8")
                            sock.close()
                            if data == "OK":
                                succeeded = True
                                break
                        except Exception as e:
                            log(ID, e)
                        log(
                            ID, "Unable to dispatch mutations to " +
                            CLUSTER_WORKERS[i] +
                            ". Retrying after sleeping for 30s")
                        time.sleep(30)
                    if not succeeded:
                        log(
                            ID, "Unable to dispatch mutations to " +
                            CLUSTER_WORKERS[i] + "!")
                log(
                    ID,
                    "> Dispatched all mutations to cluster. Waiting for results."
                )
                cluster_event.clear()
                cluster_event.wait(
                    args.cluster_max_wait
                )  # Cut our losses if some results never get returned
                for k in range(lambda_):
                    if k in cluster_cumulative_rewards:
                        arfitness[k] = cluster_cumulative_rewards[k]
                    else:
                        arfitness[k] = 0.

            if cumulative_rewards_over_generations is None:
                cumulative_rewards_over_generations = np.expand_dims(
                    arfitness, 0)
            else:
                cumulative_rewards_over_generations = np.concatenate(
                    (cumulative_rewards_over_generations,
                     np.expand_dims(arfitness, 0)),
                    axis=0)

            arindex = np.argsort(-arfitness)
            # arfitness = arfitness[arindex]

            xold = xmean
            xmean = weights.dot(arx[arindex[0:mu]])

            avg_cumulative_reward = np.mean(arfitness)

            log(
                ID,
                "> Finished evolution generation #{}, average cumulative reward = {:.2f}"
                .format(generation, avg_cumulative_reward))

            if generation > 1 and args.curriculum:
                if last_highest_avg_cumulative_reward is None:
                    last_highest_avg_cumulative_reward = np.mean(
                        cumulative_rewards_over_generations[-2])
                log(
                    ID,
                    "> Highest average cumulative reward from previous generations = {:.2f}"
                    .format(last_highest_avg_cumulative_reward))
                if avg_cumulative_reward > (
                        last_highest_avg_cumulative_reward *
                        0.99):  #Let is pass if within 1% of the old average
                    max_timesteps += curriculum_step
                    log(
                        ID,
                        "> Average cumulative reward increased. Increasing max timesteps to "
                        + str(max_timesteps))
                    last_highest_avg_cumulative_reward = None
                else:
                    log(
                        ID,
                        "> Average cumulative reward did not increase. Keeping max timesteps at "
                        + str(max_timesteps))

            # Average over the whole population, but breaking here means we use only the
            # top x% of the mutations as the calculation for the final mean
            if avg_cumulative_reward >= stopfitness:
                solution_found = True
                break

            ps = (1 - cs) * ps + np.sqrt(cs * (2 - cs) * mueff) * invsqrtC.dot(
                (xmean - xold) / sigma)
            hsig = np.linalg.norm(ps) / np.sqrt(1 - (1 - cs)**(
                2 * counteval / lambda_)) / chiN < 1.4 + 2 / (N + 1)
            pc = (1 - cc) * pc + hsig * np.sqrt(cc * (2 - cc) * mueff) * (
                (xmean - xold) / sigma)
            artmp = (1 / sigma) * (arx[arindex[0:mu]] - xold)
            C = (1 - c1 - cmu) * C + c1 * (pc.dot(pc.T) + (1 - hsig) * cc *
                                           (2 - cc) * C) + cmu * artmp.T.dot(
                                               np.diag(weights)).dot(artmp)
            sigma = sigma * np.exp(
                (cs / damps) * (np.linalg.norm(ps) / chiN - 1))

            if counteval - eigeneval > lambda_ / (c1 + cmu) / N / 10:
                eigeneval = counteval
                C = np.triu(C) + np.triu(C, 1).T
                D, B = np.linalg.eig(C)
                D = np.sqrt(D)
                invsqrtC = B.dot(np.diag(D**-1).dot(B.T))

            if generation % args.snapshot_interval == 0:
                snapshot_file = os.path.join(
                    output_dir, "snapshot_iter_" + str(generation) + ".npz")
                log(ID, "> Saving snapshot to " + str(snapshot_file))
                np.savez_compressed(snapshot_file,
                                    pc=pc,
                                    ps=ps,
                                    B=B,
                                    D=D,
                                    C=C,
                                    invsqrtC=invsqrtC,
                                    eigeneval=eigeneval,
                                    xmean=xmean,
                                    sigma=sigma,
                                    counteval=counteval,
                                    generation=generation,
                                    cumulative_rewards_over_generations=
                                    cumulative_rewards_over_generations,
                                    max_timesteps=max_timesteps)

            generation += 1

        if solution_found:
            log(ID, "Evolution Complete!")
            log(
                ID, "Solution found at generation #" + str(generation) +
                ", with average cumulative reward = " +
                str(avg_cumulative_reward) + " over " +
                str(args.lambda_ * args.trials) + " rollouts")
        else:
            log(ID, "Solution not found")

        controller_model_file = os.path.join(output_dir, ID + ".model")
        if os.path.exists(controller_model_file):
            os.remove(controller_model_file)
        log(ID, "Saving model to: " + controller_model_file)
        np.savez_compressed(controller_model_file,
                            pc=pc,
                            ps=ps,
                            B=B,
                            D=D,
                            C=C,
                            invsqrtC=invsqrtC,
                            eigeneval=eigeneval,
                            xmean=xmean,
                            sigma=sigma,
                            counteval=counteval,
                            generation=generation,
                            cumulative_rewards_over_generations=
                            cumulative_rewards_over_generations,
                            max_timesteps=max_timesteps)
        os.rename(os.path.join(output_dir, ID + ".model.npz"),
                  controller_model_file)

    # xmean = np.random.randn(action_dim * (args.z_dim + args.hidden_dim) + action_dim).astype(np.float32)
    # xmean = np.random.randn(args.z_dim + 2 * args.hidden_dim).astype(np.float32)
    parameters = xmean

    if args.in_dream:
        log(ID,
            "Generating a rollout gif with the controller model in a dream")
        W_c, b_c = transform_to_weights(args, parameters)
        cumulative_reward, frames = rollout(
            (0, 0, 0, args, vision.to_cpu(), model.to_cpu(), None, W_c, b_c,
             None, True))
        imageio.mimsave(os.path.join(output_dir, 'dream_rollout.gif'),
                        frames,
                        fps=20)
        log(ID, "Final cumulative reward in dream: " + str(cumulative_reward))
        args.in_dream = False

    log(
        ID,
        "Generating a rollout gif with the controller model in the environment"
    )
    W_c, b_c = transform_to_weights(args, parameters)
    cumulative_reward, frames = rollout(
        (0, 0, 0, args, vision.to_cpu(), model.to_cpu(), None, W_c, b_c, None,
         True))
    imageio.mimsave(os.path.join(output_dir, 'env_rollout.gif'),
                    frames,
                    fps=20)
    log(ID,
        "Final cumulative reward in environment: " + str(cumulative_reward))

    log(ID, "Done")
Beispiel #59
0
class SoundManager:
    """
    Object which configure the sound management on the robot
    """
    def __init__(self):
        # - Init
        self.__sound_player = SoundPlayer()
        self.__sound_database = SoundDatabase()
        self.__text_to_speech = NiryoTextToSpeech(self, self.__sound_database)

        self.__sound_thread = Thread()
        self.sound_end_event = Event()
        self.sound_end_event.clear()

        self.play_sound(self.__sound_database.wake_up_sound)

        self.__rpi_overheating = False
        self.__overheat_timer = None
        self.__error_sound_delay = rospy.get_param("~error_sound_delay")

        # - Subscribers
        self.__robot_status = RobotStatus.BOOTING
        self.__logs_status = RobotStatus.NONE
        rospy.Subscriber('/niryo_robot_status/robot_status', RobotStatus,
                         self.__callback_sub_robot_status)

        rospy.Subscriber('/niryo_studio_connection', Empty,
                         self.__callback_niryo_studio)

        # - Services
        rospy.Service('/niryo_robot_sound/play', PlaySound,
                      self.__callback_play_sound_user)

        # Set a bool to mention this node is initialized
        rospy.set_param('~initialized', True)
        rospy.loginfo("Sound Interface - Started")

    # - Callbacks
    def __callback_sub_robot_status(self, msg):
        if self.__robot_status == RobotStatus.SHUTDOWN:
            return
        elif msg.robot_status == RobotStatus.SHUTDOWN:
            self.__robot_status = msg.robot_status
            rospy.sleep(1.5)  # avoid ctrl+c
            self.play_shutdown_sound()
            self.sound_end_event.set()
            return

        if msg.rpi_overheating != self.__rpi_overheating:
            self.__rpi_overheating = msg.rpi_overheating
            if self.__rpi_overheating:
                sound = self.__sound_database.error_sound
                self.play_sound(sound)
                self.__overheat_timer = rospy.Timer(
                    rospy.Duration(self.__error_sound_delay),
                    self.__error_sound_callback)
            elif self.__overheat_timer is not None:
                self.__overheat_timer.shutdown()
                self.__overheat_timer = None

        if self.__robot_status != msg.robot_status:
            last_status = self.__robot_status
            self.__robot_status = msg.robot_status

            if last_status in [RobotStatus.RUNNING_AUTONOMOUS, RobotStatus.LEARNING_MODE_AUTONOMOUS] \
                    and self.__robot_status not in [RobotStatus.RUNNING_AUTONOMOUS,
                                                    RobotStatus.LEARNING_MODE_AUTONOMOUS]:
                self.__sound_player.stop()

            if last_status == RobotStatus.BOOTING and self.__robot_status != RobotStatus.BOOTING:
                self.__sound_player.stop_w_fade_out()
                self.play_sound(self.__sound_database.robot_ready_sound)
            elif self.__robot_status in [
                    RobotStatus.FATAL_ERROR, RobotStatus.MOTOR_ERROR
            ]:
                self.play_sound(self.__sound_database.error_sound)
            elif last_status != RobotStatus.CALIBRATION_IN_PROGRESS and \
                    msg.robot_status == RobotStatus.CALIBRATION_IN_PROGRESS:
                self.play_sound(self.__sound_database.calibration_sound)

        if self.__logs_status != msg.logs_status:
            self.__logs_status = msg.logs_status
            if self.__logs_status in [
                    RobotStatus.ERROR, RobotStatus.FATAL_ERROR
            ]:
                self.play_sound(self.__sound_database.error_sound)

    def __callback_play_sound_user(self, msg):
        sound_name = msg.sound_name
        return self.play_user_sound(sound_name, msg.start_time_sec,
                                    msg.end_time_sec, msg.wait_end)

    def play_user_sound(self,
                        sound_name,
                        start_time_sec=0,
                        end_time_sec=0,
                        wait_end=True):
        sound = self.__sound_database(sound_name)
        if sound is None:
            return CommandStatus.SOUND_FILE_NOT_FOUND, "{} sound not found".format(
                sound_name)

        self.play_sound(sound, start_time_sec, end_time_sec, wait=wait_end)

        if sound.preempted:
            return CommandStatus.SUCCESS, "{} sound preempted".format(
                sound_name)
        return CommandStatus.SUCCESS, "{} sound played with success".format(
            sound_name)

    def __error_sound_callback(self, _):
        if self.__rpi_overheating:
            sound = self.__sound_database.error_sound
            self.play_sound(sound)
        elif self.__overheat_timer is not None:
            self.__overheat_timer.shutdown()
            self.__overheat_timer = None

    def __callback_niryo_studio(self, _):
        if not self.__sound_player.is_busy():
            sound = self.__sound_database.connection_sound
            self.play_sound(sound)

    def play_sound(self, sound, start_time=0, end_time=0, wait=False):
        if self.__sound_thread.is_alive():
            self.__sound_player.stop()
            self.__sound_thread.join()

        self.__sound_thread = Thread(target=self.__sound_player.play_sound,
                                     args=(sound, start_time, end_time))
        self.__sound_thread.start()

        if wait:
            while not rospy.is_shutdown() and self.__sound_thread.is_alive():
                self.__sound_thread.join(timeout=0.1)

    def play_shutdown_sound(self):
        rospy.loginfo("Play shutdown sound")

        if self.__overheat_timer is not None:
            self.__overheat_timer.shutdown()
            self.__overheat_timer = None

        if self.__sound_thread.is_alive():
            self.__sound_player.stop()
            self.__sound_thread.join()

        sound = self.__sound_database.sleep_sound
        self.__sound_player.play_sound(sound)
class IndicatorSysmonitor(object):
    SENSORS_DISABLED = False

    def __init__(self):
        self._preferences_dialog = None
        self._help_dialog = None

        #fn, self.tindicator = tempfile.mkstemp(suffix=".svg")

        #with open(self.tindicator, "w") as f:
        #    svg = '<?xml version="1.0" encoding="UTF-8" \
        #                standalone="no"?><svg id="empty" xmlns="http://www.w3.org/2000/svg" \
        #                height="22" width="1" version="1.0" \
        #                xmlns:xlink="http://www.w3.org/1999/xlink"></svg>'
        #    f.write(svg)
        #    f.close()

        #self.ind = appindicator.Indicator.new("indicator-sysmonitor", self.tindicator, \
        #                                      appindicator.IndicatorCategory.SYSTEM_SERVICES)
        self.ind = Gtk.Button.new()
        #self.ind.set_ordering_index(0)

        #self.ind.set_status(appindicator.IndicatorStatus.ACTIVE)
        self.ind.set_label("Init...")

        self._create_menu()

        self.alive = Event()
        self.alive.set()

        self.sensor_mgr = SensorManager()
        self.load_settings()

    def _create_menu(self):
        """Creates the main menu and shows it."""
        # create menu {{{
        menu = Gtk.Menu()
        # add System Monitor menu item
        full_sysmon = Gtk.MenuItem(_('System Monitor'))
        full_sysmon.connect('activate', self.on_full_sysmon_activated)
        menu.add(full_sysmon)
        menu.add(Gtk.SeparatorMenuItem())

        # add preferences menu item
        pref_menu = Gtk.MenuItem(_('Preferences'))
        pref_menu.connect('activate', self.on_preferences_activated)
        menu.add(pref_menu)

        # add help menu item
        help_menu = Gtk.MenuItem(_('Help'))
        help_menu.connect('activate', self._on_help)
        menu.add(help_menu)

        # add preference menu item
        #exit_menu = Gtk.MenuItem(_('Quit'))
        #exit_menu.connect('activate', self.on_exit)
        #menu.add(exit_menu)

        menu.show_all()

        self.popup = menu
        self.ind.connect('clicked', self.popup_menu)
        logging.info("Menu shown")
        # }}} menu done!

    def popup_menu(self, *args):
        self.popup.popup(None, None, None, None, 0,
                         Gtk.get_current_event_time())

    def update_indicator_guide(self):

        guide = self.sensor_mgr.get_guide()

        #self.ind.set_property("label-guide", guide)

    def update(self, data):
        # data is the dict of all sensors and their values
        # { name, label }

        # look through data and find out if there are any icons to be set
        for sensor in data:
            test_str = data[sensor].lower()
            if "use_icon" in test_str:
                path = data[sensor].split(":")[1]
                print(path)
                self.ind.set_icon_full(path, "")
                # now strip the icon output from data so that it is not displayed
                remaining = test_str.split("use_icon")[0].strip()
                if not remaining:
                    remaining = " "

                data[sensor] = remaining

            if "clear_icon" in test_str:
                self.ind.set_icon_full(self.tindicator, "")

                remaining = test_str.split("clear_icon")[0].strip()
                if not remaining:
                    remaining = " "

                data[sensor] = remaining

        label = self.sensor_mgr.get_label(data)

        #Gdk.threads_enter()
        if label and self.ind:
            self.ind.set_label(label.strip())
        #Gdk.threads_leave()

        #self.ind.set_title(label.strip())

    def load_settings(self):

        self.sensor_mgr.load_settings()
        self.sensor_mgr.initiate_fetcher(self)
        self.update_indicator_guide()

    # @staticmethod
    def save_settings(self):
        self.sensor_mgr.save_settings()

    # actions raised from menu
    def on_preferences_activated(self, event=None):
        """Raises the preferences dialog. If it's already open, it's
        focused"""
        if self._preferences_dialog is not None:
            self._preferences_dialog.present()
            return

        self._preferences_dialog = Preferences(self)
        self._preferences_dialog.run()
        self._preferences_dialog = None

    def on_full_sysmon_activated(self, event=None):
        os.system('gnome-system-monitor &')

    def on_exit(self, event=None, data=None):
        """Action call when the main programs is closed."""
        # cleanup temporary indicator icon
        os.remove(self.tindicator)
        # close the open dialogs
        if self._help_dialog is not None:
            self._help_dialog.destroy()

        if self._preferences_dialog is not None:
            self._preferences_dialog.destroy()

        logging.info("Terminated")
        self.alive.clear()  # DM: why bother with Event() ???

        try:
            Gtk.main_quit()
        except RuntimeError:
            pass

    def _on_help(self, event=None, data=None):
        """Raise a dialog with info about the app."""
        if self._help_dialog is not None:
            self._help_dialog.present()
            return

        self._help_dialog = Gtk.MessageDialog(
            None, Gtk.DialogFlags.DESTROY_WITH_PARENT, Gtk.MessageType.INFO,
            Gtk.ButtonsType.OK, None)

        self._help_dialog.set_title(_("Help"))
        self._help_dialog.set_markup(HELP_MSG)
        self._help_dialog.run()
        self._help_dialog.destroy()
        self._help_dialog = None