Esempio n. 1
0
File: world.py Progetto: bblack/pirc
class World:

  def __init__(self):
    self.connections = []
    self.connection_added = Event()
    self.channel_opened = Event()

  def get_connections(self):
    ret = []
    for c in self.connections:
      ret.append(c)
    return ret

  def remove_connection(self, connection):
    if connection.is_connected:
      raise "I don't think so, tim"
    self.connections.remove(connection)

  def new_connection(self):
    conn = Connection()
    self.connections.append(conn)

    conn.channel_opened += self.handle_channel_opened

    self.connection_added.fire(self, conn)
    return conn

  def shut_it_down(self):
    for c in self.connections:
      c.shut_it_down()

  def handle_channel_opened(self, channel):
    self.channel_opened.fire(self, channel)
 def test_a_listener_is_passed_right_parameters(self):
     """"""
     listener = Mock()
     event = Event()
     event.connect(listener)
     event.fire(5, shape="square")
     listener.assert_called_with(5, shape="square")
 def test_a_listener_is_notified_when_an_event_is_raised(self):
     """"""
     listener = Mock()
     event = Event()
     event.connect(listener)
     event.fire()  
     self.assertTrue(listener.called)
Esempio n. 4
0
 def test_Event_SubscribeAndFireWithDupplicateKeyword_RaisesException(self):
     mockListener = mock.Mock()
     event = Event()
     event.subscribe(mockListener, kword1="value1")
 
     with self.assertRaises(DuplicateKeywordArgument):
         event.fire(kword1="value2")
Esempio n. 5
0
 def test_Event_SubscribeWithArgsAndKwargsFireWithArgsAndKwargs_ListenerIsCalledWithMergedArguments(self):
     mockListener = mock.Mock()
     event = Event()
     event.subscribe(mockListener, "arg1", "arg2", kword1="value1", kword2="value2")
 
     event.fire("arg3", "arg4", kword3="value3", kword4="value4")
 
     mockListener.assert_called_with('arg1', 'arg2', 'arg3', 'arg4', kword4='value4', kword3='value3', kword2='value2', kword1='value1')
Esempio n. 6
0
    def test_Event_FireUsingDifferentCallMethod_CallMethodIsCalled(self):
        mock_call_method = mock.Mock()
        event = Event(call_method=mock_call_method)
        event.subscribe("fct1", "arg1")
    
        event.fire("arg2")

        mock_call_method.assert_called_with("fct1", "arg1", "arg2")
Esempio n. 7
0
    def test_Event_UnsubscribeItselfInACallBack_CallbackIsUnsubscribed(self):
        def unsubscriber_callback(event):
            event.unsubscribe(unsubscriber_callback)
        event = Event()
        event.subscribe(unsubscriber_callback, event=event)
    
        event.fire()

        self.assertFalse(event.is_subscribed(unsubscriber_callback))
Esempio n. 8
0
    def test_Event_SubscribeInACallBack_CallbackIsSubscribed(self):
        def subscriber_callback(event, callback2):
            event.subscribe(callback2)
        event = Event()
        event.subscribe(subscriber_callback, event=event, callback2="callback1")
    
        event.fire()

        self.assertTrue(event.is_subscribed("callback1"))
Esempio n. 9
0
class TagReader:
    def __init__(self, connection_string):
        self.clf = nfc.ContactlessFrontend(connection_string)
        self.tag_discovered = Event()
        start_new_thread(self.loop, ())

    def loop(self):
        while True:
            self.clf.connect(rdwr={'on-connect': self.on_connect})

    def on_connect(self, tag):
        self.tag_discovered.fire(tag.identifier.encode('hex'))
        return True
Esempio n. 10
0
class TagReader:
    def __init__(self, connection_string):
        self.clf = nfc.ContactlessFrontend(connection_string)
        self.tag_discovered = Event()
        start_new_thread(self.loop, ())

    def loop(self):
        while True:
            self.clf.connect(rdwr={"on-connect": self.on_connect})

    def on_connect(self, tag):
        self.tag_discovered.fire(tag.identifier.encode("hex"))
        return True
Esempio n. 11
0
class Death(MixinBase):
    """
    Mixin that keeps track of an actor's death status. Also contains
    an event allowing other objects to hook the actor's death.
    """
    def __init__(self, *a, **kwa):
        self.age = 0
        self._dead = False
        self.on_death = Event('on_death')
        self.on_death.hook(self.sim.handle_death)

    def end_step(self):
        if self._dead:
            self.on_death.fire(self)
Esempio n. 12
0
class Channel:
  def __init__(self, connection, name):
    self.connection = connection
    self.name = name
    self.getting_names = False
    self.nicks = set()

    self.nick_added = Event()
    self.nick_removed = Event()
    self.nick_changed = Event()
    self.msg_received = Event()
    self.someone_kicked = Event() # Someone, anyone, was kicked from the channel
    self.someone_joined = Event()
    self.someone_parted = Event()
    self.someone_quit = Event()
    self.left = Event() # The pirc user is no longer in the channel

    self.connection.received += self.catch_channel_shit
    self.connection.received += self.catch_joins
    self.connection.received += self.catch_parts
    self.connection.received += self.catch_kicks
    self.connection.received += self.catch_privmsgs
    self.connection.closed += self.catch_connection_closed

    self.connection.nick_changed += self.catch_nick_changed

  def _leave(self):
    for nick in self.nicks.copy():
      self._remove_nick(nick)
    self.left.fire(self)

  def catch_connection_closed(self, msg):
    self._leave()

  def catch_nick_changed(self, (old_nick, new_nick)):
    if old_nick in self.nicks:
      self.nicks.remove(old_nick)
      self.nicks.add(new_nick)
      self.nick_changed.fire(self, old_nick, new_nick)
Esempio n. 13
0
class PrayerTimesNotifier(object):
    def __init__(self, location, alert_on):
        self.praytime = Praytime(location)
        self.waitingfor = getNextPrayer(self.praytime)
        prayername = getPreviousPrayerName(self.waitingfor[0])
        self.now = (prayername, getattr(self.praytime, prayername))
        self.running = False
        self.alarm = Alarm()
        self._ontime = Event()
        self.alert_on = alert_on

    @property
    def onTime(self):
        return self._ontime

    def start(self):
        """
        Start notifying on prayers times
        """
        if self.waitingfor == self.now or not self.running:
            self.waitingfor = getNextPrayer(self.praytime, self.now[0])
            logging.info("Adding alarm %s", self.waitingfor)
            self.alarm.addAlarm(self.waitingfor[1], self._notify, self.waitingfor[0], self.waitingfor[1])
        self.running = True
        return True

    def _notify(self, *args):
        self.now = self.waitingfor
        if self.waitingfor[0] in self.alert_on:
            self._ontime.fire(*args)
        self.start()

    def __str__(self):
        return str(self.praytime)

    def __repr__(self):
        return str(self)
Esempio n. 14
0
class InterfaceWrapper:
    """
    The main class used by the startup to handle the interfacing.
    """

    def __init__(self, logger, DEMO_MODE=False):
        self.DEMO_MODE = DEMO_MODE
        gpio.setwarnings(False)
        gpio.setmode(gpio.BCM)
        self.logger = logger
        self.gpio = GpioLines()
        self.gpio.reg.output()
        self.gpio.io.output()
        self.digit_received = Event()
        """The digit received event, fired when the class detects that a digit has been pressed"""
        self._run_loop = True
        self._current_digit = 0
        self._digit_down = False
        self._done_line_write = False
        # the follow store the value to apply to the given output on the next
        # pass
        self._led_green_apply = 0
        self._led_red_apply = 0
        self._buzzer_apply = 0

        self.logger.log("interface wrapper init complete")

    def main_loop(self):
        """
        Starts up the main loop of the interface, loop is wrapped in an exception handler that can handle both ‘Exception’ and ‘KeyboardInterrupt’.
        """
        self.logger.log("Main loop started")
        try:
            while self._run_loop:
                # write phase
                if self.DEMO_MODE:
                    input("> do write phase <")
                self._start_write()
                self._do_write_phase()
                # read phase
                if self.DEMO_MODE:
                    input("> do read phase <")
                self._start_read()
                self._do_read_phase()
        except Exception as ex:
            self.logger.loge(ex)
        except KeyboardInterrupt:
            self.logger.log("Beginning shutdown")

    def _inc_current_digit(self):
        self._current_digit += 1
        self._current_digit %= DPOS_NDIGITS

    def _dec_current_digit(self):
        self._current_digit -= 1
        self._current_digit %= DPOS_NDIGITS

    def _start_read(self):
        self.gpio.d_input()
        self.logger.logt("data lines set to input")
        GpioWrapper.wait()
        self.gpio.io.low()
        self.logger.logt("io line high")
        self.logger.logt("gpio lines configured to read")

    def _do_read_phase(self):
        if not self._done_line_write:
            self.logger.logt(
                "skipped reading rows due to previous write not being to column")
            return
        if self._digit_down:
            if self.gpio.d_all_high():
                self._digit_down = False
                self._inc_current_digit()
                self.logger.logt(
                    "digit no longer pressed, waiting for next input")
            else:
                self.logger.logt("digit still pressed")
        else:
            states = self.gpio.d_states()
            self.logger.logt("state readings: {}", states)
            active = None
            for s in range(len(states)):
                if not states[s]:
                    active = s
                    break
            if active is not None:
                self._digit_down = True
                self._dec_current_digit()
                dgt = DIGIT_CONVERT[self._current_digit][s]
                self.logger.logt("converted digit: {}", dgt)
                self.digit_received.fire(dgt)

    def _start_write(self):
        self.gpio.io.high()
        self.logger.logt("io line low")
        self.gpio.d_output()
        self.logger.logt("data lines set to output")
        self.logger.logt("gpio lines configured to write")

    def _do_write_phase(self):
        low_line = None
        self._done_line_write = False
        if self._led_green_apply:
            low_line = DPOS_GREEN_LED
            self._led_green_apply -= 1
            self.logger.logt("low line set to green LED")
        elif self._led_red_apply:
            low_line = DPOS_RED_LED
            self._led_red_apply -= 1
            self.logger.logt("low line set to red LED")
        elif self._buzzer_apply:
            low_line = DPOS_BUZZER
            self._buzzer_apply -= 1
            self.logger.logt("low line set to buzzer")
        else:  # apply keypad input
            low_line = DPOS_DIGIT[self._current_digit]
            if not self._digit_down:
                self._inc_current_digit()
            self._done_line_write = True
            self.logger.logt("low line set to next keypad row")
        # bin_out = bin(low_line)[2:].zfill(3)
        # self.logger.logt("gpio states: {}", bin_out)
        # self.gpio.d_set_states([bool(int(d)) for d in reversed(bin_out)])
        self.gpio.d_set_states(DPOS_CONVERT[low_line])
        self.gpio.reg.high()
        self.gpio.reg.low()
        #self.gpio.d_set_states([True, True, True])
        self.logger.logt("pulsed register line")

    def cleanup(self):
        """
        Cleans up any variables before exit.
        """
        self._run_loop = False
        gpio.cleanup()

    def flash_green_led(self):
        """
        Causes the green LED to flash once (if mid-flash, it will either do nothing or just reset the duration left on the flash, depending on how the hardware works).
        """
        self._led_green_apply = 2
        self.logger.log("green LED set to flash on next pass")

    def flash_red_led(self):
        """
        Causes the red LED to flash once (if mid-flash, it will either do nothing or just reset the duration left on the flash, depending on how the hardware works).
        """
        self._led_red_apply = 2
        self.logger.log("red LED set to flash on next pass")

    def beep_buzzer(self):
        """
        Causes the buzzer to go off once (if it is already buzzing, it will either do nothing or just reset the duration left on the buzz, depending on how the hardware works).
        """
        self._buzzer_apply = 2
        self.logger.log("buzzer set to activate on next pass")
Esempio n. 15
0
class DB(object):
  def __init__(self, settings):
    self.settings = settings
    self.needs_indexing = Event() # fired when the database gets dirtied and needs syncing
    self._pending_indexer = None # non-None if a DBIndexer is running
    self._cur_shard_manager = None # the current DBShardManager object. This has all the DBIndexShards.
    self._cur_query_cache = None

    self._dir_cache = DirCache() # thread only state

    # if we are currently looking for changed dirs, this is the iterator
    # directories remaining to be checked
    self._pending_up_to_date_generator = None

    self.settings.register('dirs', list, [], self._on_settings_dirs_changed)
    self._on_settings_dirs_changed(None, self.settings.dirs)

    self.settings.register('ignores', list, [], self._on_settings_ignores_changed)
    if self.settings.ignores == []:
      self.settings.ignores = DEFAULT_IGNORES;

    self._on_settings_ignores_changed(None, self.settings.ignores)

    self.settings.register('token', str, "", self._on_settings_token_changed)
    self._on_settings_token_changed(None, self.settings.token)

  def close(self):
    if self._cur_shard_manager:
      self._cur_shard_manager.close()
      self._cur_shard_manager = None

  ###########################################################################

  def _on_settings_dirs_changed(self, old, new):
    self._dirs = map(lambda d: DBDir(d), new)
    self._set_dirty()

  @property
  def dirs(self):
    return list(self._dirs)

  def add_dir(self, d):
    real_d = os.path.realpath(d)

    cur = list(self.settings.dirs)
    if real_d in cur:
      raise DBException("Directory %s exists already as %s" % (d, real_d))

    # commit change
    cur.append(real_d)
    self.settings.dirs = cur  # triggers _on_settings_dirs_changed
    return self.dirs[-1]

  def delete_dir(self, d):
    if type(d) != DBDir:
      raise Exception("Expected DBDir")
    cur = list(self.settings.dirs)
    if d.path not in cur:
      raise DBException("not found")
    cur.remove(d.path)
    self.settings.dirs = cur # triggers _on_settings_dirs_changed

  ###########################################################################

  def _on_settings_ignores_changed(self, old, new):
    self._set_dirty()

  @property
  def ignores(self):
    return list(self.settings.ignores)

  def ignore(self,pattern):
    i = list(self.settings.ignores)
    if pattern in i:
      return
    i.append(pattern)
    self.settings.ignores = i

  def unignore(self,pattern):
    i = list(self.settings.ignores)
    i.remove(pattern)
    self.settings.ignores = i

  ###########################################################################

  def _on_settings_token_changed(self, old, new):
    self._set_dirty()

  @property
  def token(self):
    return self.settings.token

  @token.setter
  def token(self, token):
    self.settings.token = token

  ###########################################################################

  @property
  def has_index(self):
    return self._cur_shard_manager != None

  @property
  def is_up_to_date(self):
    return self._pending_indexer == None

  def check_up_to_date(self):
    if not self.is_up_to_date:
      return False
    import time
    self.check_up_to_date_a_bit_more()
    while self._pending_up_to_date_generator:
      self.check_up_to_date_a_bit_more()

  @traced
  def check_up_to_date_a_bit_more(self):
    if not self.is_up_to_date:
      return

    if self._pending_up_to_date_generator == None:
      logging.debug("Starting to check for changed directories.")
      self._pending_up_to_date_generator = self._dir_cache.iterdirnames().__iter__()

    for i in range(10):
      try:
        d = self._pending_up_to_date_generator.next()
      except StopIteration:
        self._pending_up_to_date_generator = None
        logging.debug("Done checking for changed directories.")
        break
      if self._dir_cache.listdir_with_changed_status(d)[1]:
        logging.debug("Change detected in %s!", d)
        self._pending_up_to_date_generator = None
        self._set_dirty()
        break

  def begin_reindex(self):
    self._set_dirty()

  def _set_dirty(self):
    was_indexing = self._pending_indexer != None
    if self._pending_indexer:
      self._pending_indexer = None
    self._pending_indexer = 1 # set to 1 as indication to step_indexer to create new indexer
    if not was_indexing:
      self.needs_indexing.fire()

  @traced
  def status(self):
    if self._pending_indexer:
      if isinstance(self._pending_indexer, DBIndexer): # is an integer briefly between _set_dirty and first step_indexer
        if self._cur_shard_manager:
          status = "syncing: %s, %s" % (self._pending_indexer.progress, self._cur_shard_manager.status)
        else:
          status = "first-time sync: %s" % self._pending_indexer.progress
      else:
        status = "sync scheduled"
    else:
      if self._cur_shard_manager:
        status = "up-to-date: %s" % self._cur_shard_manager.status
      else:
        status = "sync required"

    res = DBStatus()
    res.is_up_to_date = self.is_up_to_date
    res.has_index = self.has_index
    res.status = status
    return res

  @traced
  def step_indexer(self):
    if not self._pending_indexer:
      return

    if not isinstance(self._pending_indexer, DBIndexer):
      self._dir_cache.set_ignores(self.settings.ignores)
      self._pending_indexer = DBIndexer(self.settings.dirs, self._dir_cache)

    if self._pending_indexer.complete:
      self._cur_shard_manager = DBShardManager(self._pending_indexer)
      self._cur_query_cache = QueryCache()
      self._pending_indexer = None
    else:
      self._pending_indexer.index_a_bit_more()

  def sync(self):
    """Ensures database index is up-to-date"""
    self.check_up_to_date()
    while self._pending_indexer:
      self.step_indexer()

  ###########################################################################
  @traced
  def search(self, *args, **kwargs):
    """
    Searches the database.

    args/kwargs should be either a Query object, or arguments to the Query-object constructor.
    """
    if self._pending_indexer:
      self.step_indexer()
      # step sync might change the db sync status
      if not self._cur_shard_manager:
        return QueryResult()

    query = Query.from_kargs(args, kwargs)
    return query.execute(self._cur_shard_manager, self._cur_query_cache)
Esempio n. 16
0
class NetworkNode(object):
    
    MAX_REQUEST_ID = 1000000
    """ keep track of requests send to other nodes """
    def __init__(self, network_handler, random=SystemRandom()):
        self.network_handler = network_handler
        self.requests_sent = {}
        self.requests_received = {}
        self.addr = self.network_handler.getaddr()
        self.network_handler.ON_MESSAGE.subscribe(self._on_message)
        self.random = random
        self.ON_REQUEST = Event()
    
    def find_unique_requestid(self):
        result_id = self.random.randint(0, self.MAX_REQUEST_ID)
        while result_id in self.requests_sent:
            result_id = self.random.randint(0, self.MAX_REQUEST_ID)
        return result_id

    def request(self, addr, request):
        id = self.find_unique_requestid()
        f = Future()
        self.requests_sent[id] = (f, request)
        self._send_request(addr, request, id)
        return f

    def reply(self, reply_future):
        try:
            result = reply_future.result()
        except:
            traceback.print_exc()
        from_addr, request, id = self.requests_received[reply_future] 
        self._send_reply(from_addr, result, id)
    
    def _send_request(self, addr, request, id):
        data = json.dumps({"request": request, "id": id})
        self.network_handler.send(addr, data)

    def _send_reply(self, addr, reply, id):
        data = json.dumps({"reply": reply, "id": id})
        self.network_handler.send(addr, data)
        
    def _on_message(self, from_addr, data):
        data = json.loads(data)
        if "request" in data:
            reply_future = Future()
            self.requests_received[reply_future] = (from_addr, data["request"], data.get("id"))
            self.ON_REQUEST.fire(reply_future, from_addr, data["request"], id)
            reply_future.add_done_callback(self.reply)
            #then(reply_future, self.reply)
        if "reply" in data:
            if "id" not in data:
                print "error: missing 'id' in reply"
                return
            if not (isinstance(data["id"], basestring) or type(data["id"]) is int):
                print "error: 'id' should be an int or a string"
                return
            if data["id"] not in self.requests_sent:
                print self.requests_sent
                print data["id"]
                print "error: unknown 'id'"
                return
            future, request = self.requests_sent[data["id"]]
            future.set_result(data["reply"])
Esempio n. 17
0
class Daemon(BaseHTTPServer.HTTPServer):
    def __init__(self, test_mode, *args):
        BaseHTTPServer.HTTPServer.__init__(self, *args)
        self.port_ = args[0][1]
        self.routes = []
        self.test_mode = test_mode
        self.hi_idle = Event(
        )  # event that is fired every 0.05sec as long as no transactions are pending
        self.lo_idle = Event()  # event that is fired once a second
        self.exit = Event()

        self.add_json_route('/exit', self.on_exit, ['POST', 'GET'])

        if test_mode:
            import daemon_test
            daemon_test.add_test_handlers_to_daemon(self)

    def on_exit(self, m, verb, data):
        logging.info("Exiting upon request.")
        self.shutdown()
        self.exit.fire()
        return {"status": "OK"}

    def add_json_route(self, path_regex, handler, allowed_verbs):
        re.compile(path_regex)
        self.routes.append(Route(path_regex, 'json', handler, allowed_verbs))

    def find_route_matching(self, path, verb):
        found_route = None

        for r in self.routes:
            m = re.match(r.path_regex, path)
            if m:
                found_route = r
                if verb in r.allowed_verbs:
                    return (r, True, m)
        if found_route:
            return (r, False, m)
        return (None, None, None)

    def serve_forever(self):
        self.is_running_ = True
        while self.is_running_:
            if self.hi_idle.has_listeners:
                delay = 0.05
                fire_lo_idle_listeners = False
            else:
                delay = 1
                fire_lo_idle_listeners = True

            r, w, e = select.select([self], [], [], delay)
            if r:
                self.handle_request()
            else:
                self.hi_idle.fire()
            if fire_lo_idle_listeners:
                self.lo_idle.fire()

    def shutdown(self):
        self.is_running_ = False
        return 1

    def run(self):
        if self.test_mode:
            logging.info('Starting quickopen daemon on port %d', self.port_)
        else:
            sys.stderr.write('Starting quickopen daemon on port %d\n' %
                             self.port_)
        self.serve_forever()
        logging.info('Shutting down quickopen daemon on port %d', self.port_)
Esempio n. 18
0
class DB(object):
  def __init__(self, settings):
    self.settings = settings
    self.needs_indexing = Event() # fired when the database gets dirtied and needs syncing
    self._pending_indexer = None # non-None if a DBIndex is running
    self._cur_index = None # the last DBIndex object --> actually runs the searches

    self._dir_cache = DirCache() # thread only state

    # if we are currently looking for changed dirs, this is the iterator
    # directories remaining to be checked
    self._pending_up_to_date_generator = None 

    self.settings.register('dirs', list, [], self._on_settings_dirs_changed)
    self._on_settings_dirs_changed(None, self.settings.dirs)

    self.settings.register('ignores', list, [], self._on_settings_ignores_changed)
    if self.settings.ignores == []:
	self.settings.ignores = DEFAULT_IGNORES;

    self._on_settings_ignores_changed(None, self.settings.ignores)

  ###########################################################################

  def _on_settings_dirs_changed(self, old, new):
    self._dirs = map(lambda d: DBDir(d), new)
    self._set_dirty()

  @property
  def dirs(self):
    return list(self._dirs)

  def add_dir(self, d):
    real_d = os.path.realpath(d)

    cur = list(self.settings.dirs)
    if real_d in cur:
      raise DBException("Directory %s exists already as %s" % (d, real_d))

    # commit change
    cur.append(real_d)
    self.settings.dirs = cur  # triggers _on_settings_dirs_changed
    return self.dirs[-1]

  def delete_dir(self, d):
    if type(d) != DBDir:
      raise Exception("Expected DBDir")
    cur = list(self.settings.dirs)
    if d.path not in cur:
      raise DBException("not found")
    cur.remove(d.path)
    self.settings.dirs = cur # triggers _on_settings_dirs_changed

  ###########################################################################

  def _on_settings_ignores_changed(self, old, new):
    self._set_dirty()

  @property
  def ignores(self):
    return list(self.settings.ignores)

  def ignore(self,pattern):
    i = list(self.settings.ignores)
    if pattern in i:
      return
    i.append(pattern)
    self.settings.ignores = i

  def unignore(self,pattern):
    i = list(self.settings.ignores)
    i.remove(pattern)
    self.settings.ignores = i

  ###########################################################################

  @property
  def has_index(self):
    return self._cur_index != None

  @property
  def is_up_to_date(self):
    return self._pending_indexer == None

  def check_up_to_date(self):
    if not self.is_up_to_date:
      return False
    import time
    self.check_up_to_date_a_bit_more()
    while self._pending_up_to_date_generator:
      self.check_up_to_date_a_bit_more()

  @trace
  def check_up_to_date_a_bit_more(self):
    if not self.is_up_to_date:
      return

    if self._pending_up_to_date_generator == None:
      logging.debug("Starting to check for changed directories.")
      self._pending_up_to_date_generator = self._dir_cache.iterdirnames().__iter__()

    for i in range(10):
      try:
        d = self._pending_up_to_date_generator.next()
      except StopIteration:
        self._pending_up_to_date_generator = None
        logging.debug("Done checking for changed directories.")
        break
      if self._dir_cache.listdir_with_changed_status(d)[1]:
        logging.debug("Change detected in %s!", d)
        self._pending_up_to_date_generator = None
        self._set_dirty()
        break

  def begin_reindex(self):
    self._set_dirty()

  def _set_dirty(self):
    was_indexing = self._pending_indexer != None
    if self._pending_indexer:
      self._pending_indexer = None
    self._pending_indexer = 1 # set to 1 as indication to step_indexer to create new indexer
    if not was_indexing:
      self.needs_indexing.fire()

  @trace
  def status(self):
    if self._pending_indexer:
      if isinstance(self._pending_indexer, DBIndexer): # is an integer briefly between _set_dirty and first step_indexer
        if self._cur_index:
          status = "syncing: %s, %s" % (self._pending_indexer.progress, self._cur_index.status)
        else:
          status = "first-time sync: %s" % self._pending_indexer.progress
      else:
        status = "sync scheduled"
    else:
      if self._cur_index:
        status = "up-to-date: %s" % self._cur_index.status
      else:
        status = "sync required"

    res = DBStatus()
    res.is_up_to_date = self.is_up_to_date
    res.has_index = self.has_index
    res.status = status
    return res

  @trace
  def step_indexer(self):
    if not self._pending_indexer:
      return

    if not isinstance(self._pending_indexer, DBIndexer):
      self._dir_cache.set_ignores(self.settings.ignores)
      self._pending_indexer = DBIndexer(self.settings.dirs, self._dir_cache)

    if self._pending_indexer.complete:
      self._cur_index = DBIndex(self._pending_indexer)
      self._pending_indexer = None
    else:
      self._pending_indexer.index_a_bit_more()

  def sync(self):
    """Ensures database index is up-to-date"""
    self.check_up_to_date()
    while self._pending_indexer:
      self.step_indexer()

  ###########################################################################
  def _empty_result(self):
    return DBIndexSearchResult()

  @trace
  def search(self, query, max_hits = -1):
    if self._pending_indexer:
      self.step_indexer()
      # step sync might change the db sync status
      if not self._cur_index:
        return self._empty_result()

    if query == '':
      return self._empty_result()

    if max_hits == -1:
      return self._cur_index.search(query)
    else:
      return self._cur_index.search(query, max_hits)
Esempio n. 19
0
class Daemon(BaseHTTPServer.HTTPServer):
    def __init__(self, test_mode, *args):
        BaseHTTPServer.HTTPServer.__init__(self, *args)
        self.port_ = args[0][1]
        self.routes = []
        self.test_mode = test_mode
        self._pending_timeout_heap = []
        self.exit = Event()

        self.add_json_route('/exit', self.on_exit, ['POST', 'GET'])

        if test_mode:
            import daemon_test
            daemon_test.add_test_handlers_to_daemon(self)

    def on_exit(self, m, verb, data):
        logging.info("Exiting upon request.")
        self.shutdown()
        self.exit.fire()
        return {"status": "OK"}

    def add_json_route(self, path_regex, handler, allowed_verbs):
        re.compile(path_regex)
        self.routes.append(Route(path_regex, 'json', handler, allowed_verbs))

    def find_route_matching(self, path, verb):
        found_route = None

        for r in self.routes:
            m = re.match(r.path_regex, path)
            if m:
                found_route = r
                if verb in r.allowed_verbs:
                    return (r, True, m)
        if found_route:
            return (r, False, m)
        return (None, None, None)

    def add_delayed_task(self, cb, delay, *args):
        deadline = time.time() + delay
        to = _Timeout(cb, deadline, args)
        heapq.heappush(self._pending_timeout_heap, to)

    def serve_forever(self):
        self.is_running_ = True
        while self.is_running_:
            now = time.time()
            while True:
                if len(self._pending_timeout_heap):
                    deadline = self._pending_timeout_heap[0].deadline
                    if now > deadline:
                        item = heapq.heappop(self._pending_timeout_heap)
                        item.cb(*item.args)
                    else:
                        next_deadline = deadline
                        break
                else:
                    next_deadline = now + 0.2
                    break

            now = time.time()
            delay = max(0.0, next_deadline - now)
            r, w, e = select.select([self], [], [], delay)
            if r:
                self.handle_request()

    def shutdown(self):
        self.is_running_ = False
        return 1

    def run(self):
        self.serve_forever()
Esempio n. 20
0
class Player:
    def __init__ (self, enablePin):
        self.media_player = pexpect.spawn('mpg321 -R player')
        self.playlist = []
        self.playlist_index = -1

        self.monitoring_thread = threading.Thread(target=self.run_monitoring_thread)
        self.monitoring_thread.daemon = True
        self.monitoring_thread.start()

        self.next_track_event = threading.Event()
        self.time_start_track = time()
        self.enablePin = enablePin
        GPIO.setup(self.enablePin, GPIO.OUT)
        GPIO.output(self.enablePin, GPIO.LOW)

        self.track_changed = Event()
        self.stopped = Event()
        self.volume = 0
        self.set_volume(80)

    def run_monitoring_thread(self):
        try:
            while True:
                self.media_player.expect("@P (0|3)", timeout=315360000)
                print("Track Stopped")
                self.on_song_finished()
        except (KeyboardInterrupt, SystemExit):
            pass

    def set_volume(self, new_volume):
        if new_volume > 100:
            new_volume = 100
        if new_volume < 70:
            new_volume = 70
        if new_volume != self.volume:
            self.volume = new_volume
            call(["amixer", "sset", "PCM", "%d%%" % (new_volume)])

    def previous(self):
        if len(self.playlist) == 0:
            return

        if time() - self.time_start_track > 5:
            self.play_track()
        elif self.playlist_index > 0:
            self.playlist_index -= 1
            self.play_track()

    def next(self):
        if self.playlist_index < len(self.playlist) - 1:
            self.playlist_index += 1
            self.play_track()

    def toggle_pause(self):
        self.media_player.sendline("PAUSE")

    def volume_up(self):
        self.set_volume(self.volume + 5)

    def volume_down(self):
        self.set_volume(self.volume - 5)

    def play_album(self, album):
        self.playlist = natsorted([join(album, f) for f in listdir(album) if isfile(join(album, f))], alg=ns.IGNORECASE)
        self.playlist_index = 0
        self.play_track()

    def play_track(self):
        print("Track Started")
        self.media_player.sendline("LOAD %s" % (self.playlist[self.playlist_index]))
        GPIO.output(self.enablePin, GPIO.HIGH)
        self.time_start_track = time()
        self.track_changed.fire(self.playlist_index + 1)

    def on_song_finished(self):
        self.playlist_index += 1
        if len(self.playlist) > self.playlist_index:
            self.play_track()
        else:
            self.stopped.fire()
Esempio n. 21
0
class DB(object):
    def __init__(self, settings):
        self.settings = settings
        self.needs_indexing = Event(
        )  # fired when the database gets dirtied and needs syncing
        self._pending_indexer = None  # non-None if a DBIndexer is running
        self._cur_shard_manager = None  # the current DBShardManager object. This has all the DBIndexShards.
        self._cur_query_cache = None

        self._dir_cache = DirCache()  # thread only state

        self.settings.register('dirs', list, [],
                               self._on_settings_dirs_changed)
        self._on_settings_dirs_changed(None, self.settings.dirs)

        self.settings.register('ignores', list, [],
                               self._on_settings_ignores_changed)
        if self.settings.ignores == []:
            self.settings.ignores = DEFAULT_IGNORES

        self._on_settings_ignores_changed(None, self.settings.ignores)

        self.settings.register('token', str, "",
                               self._on_settings_token_changed)
        self._on_settings_token_changed(None, self.settings.token)

    def close(self):
        if self._cur_shard_manager:
            self._cur_shard_manager.close()
            self._cur_shard_manager = None

    ###########################################################################

    def _on_settings_dirs_changed(self, old, new):
        self._dirs = map(lambda d: DBDir(d), new)
        self._set_dirty()

    @property
    def dirs(self):
        return list(self._dirs)

    def add_dir(self, d):
        real_d = os.path.realpath(d)

        cur = list(self.settings.dirs)
        if real_d in cur:
            raise DBException("Directory %s exists already as %s" %
                              (d, real_d))

        # commit change
        cur.append(real_d)
        self.settings.dirs = cur  # triggers _on_settings_dirs_changed
        return self.dirs[-1]

    def delete_dir(self, d):
        if type(d) != DBDir:
            raise Exception("Expected DBDir")
        cur = list(self.settings.dirs)
        if d.path not in cur:
            raise DBException("not found")
        cur.remove(d.path)
        self.settings.dirs = cur  # triggers _on_settings_dirs_changed

    ###########################################################################

    def _on_settings_ignores_changed(self, old, new):
        self._set_dirty()

    @property
    def ignores(self):
        return list(self.settings.ignores)

    def ignore(self, pattern):
        i = list(self.settings.ignores)
        if pattern in i:
            return
        i.append(pattern)
        self.settings.ignores = i

    def unignore(self, pattern):
        i = list(self.settings.ignores)
        i.remove(pattern)
        self.settings.ignores = i

    ###########################################################################

    def _on_settings_token_changed(self, old, new):
        self._set_dirty()

    @property
    def token(self):
        return self.settings.token

    @token.setter
    def token(self, token):
        self.settings.token = token

    ###########################################################################

    @property
    def has_index(self):
        return self._cur_shard_manager != None

    @property
    def is_up_to_date(self):
        return self._pending_indexer == None

    def begin_reindex(self):
        self._set_dirty()

    def _set_dirty(self):
        was_indexing = self._pending_indexer != None
        if self._pending_indexer:
            self._pending_indexer = None
        self._pending_indexer = 1  # set to 1 as indication to step_indexer to create new indexer
        if not was_indexing:
            self.needs_indexing.fire()

    @traced
    def status(self):
        if self._pending_indexer:
            # Is an integer briefly between _set_dirty and first step_indexer
            if not isinstance(self._pending_indexer, int):
                if self._cur_shard_manager:
                    status = "syncing: %s, %s" % (
                        self._pending_indexer.progress,
                        self._cur_shard_manager.status)
                else:
                    status = "first-time sync: %s" % self._pending_indexer.progress
            else:
                status = "sync scheduled"
        else:
            if self._cur_shard_manager:
                status = "up-to-date: %s" % self._cur_shard_manager.status
            else:
                status = "sync required"

        res = DBStatus()
        res.is_up_to_date = self.is_up_to_date
        res.has_index = self.has_index
        res.status = status
        return res

    @traced
    def step_indexer(self):
        if not self._pending_indexer:
            return

        # _pending_indexer is an integer if recreation should be triggered.
        if isinstance(self._pending_indexer, int):
            self._dir_cache.set_ignores(self.settings.ignores)
            self._pending_indexer = db_indexer.Create(self.settings.dirs,
                                                      self._dir_cache)
            self._pending_indexer_start_time = time.time()

        if self._pending_indexer.complete:
            elapsed = time.time() - self._pending_indexer_start_time
            logging.debug("Indexing with %s took %s seconds",
                          type(self._pending_indexer), elapsed)
            self._cur_shard_manager = DBShardManager(self._pending_indexer)
            self._cur_query_cache = QueryCache()
            self._pending_indexer = None

        else:
            self._pending_indexer.index_a_bit_more()

    def sync(self):
        """Ensures database index is up-to-date"""
        self.begin_reindex()
        while not self.is_up_to_date:
            self.step_indexer()

    ###########################################################################
    @traced
    def search(self, *args, **kwargs):
        """
    Searches the database.

    args/kwargs should be either a Query object, or arguments to the Query-object constructor.
    """
        if self._pending_indexer:
            self.step_indexer()
            # step sync might change the db sync status
            if not self._cur_shard_manager:
                return QueryResult()

        query = Query.from_kargs(args, kwargs)
        return query.execute(self._cur_shard_manager, self._cur_query_cache)