def run(self):
        nr_top_stories = 30
        sleep_per_story = 5 * 60 / nr_top_stories
        while True:
            url = self.URL_BASE + 'topstories.json'
            top_stories = yield from self.async_get_json(url)
            # Go through the full list every 5 mins
            slept = 0
            for item_nr in top_stories[0:nr_top_stories]:
                item_nr = str(item_nr)
                # If we have already announced the story, do not even
                # bother fetching it.
                count = yield from to_aio(HackerNewsStory.objects.filter(id=item_nr).count())
                if count:
                    continue

                url = self.URL_BASE + 'item/%s.json' % item_nr
                item = yield from self.async_get_json(url)
                if item['score'] < 100:
                    yield from asyncio.sleep(sleep_per_story)
                    slept += 1
                    continue
                yield from self.announce_new_story(item)
                story = HackerNewsStory(id=str(item['id']), time=datetime.fromtimestamp(item['time']))
                yield from to_aio(story.save())

                yield from asyncio.sleep(sleep_per_story)
                slept += 1

            if slept < nr_top_stories:
                yield from asyncio.sleep((nr_top_stories - slept) * sleep_per_story)
Beispiel #2
0
    def handle_message_changed(self, msg):
        if msg.get('channel') != self.channel.id:
            return

        qs = WorkLog.objects.filter(origin_id=msg['message']['ts'])
        log_list = yield from to_aio(qs.find_all())
        if len(log_list) != 1:
            return
        log = log_list[0]
        log.text = msg['message']['text']
        yield from to_aio(log.save())
Beispiel #3
0
    def handle_message_changed(self, msg):
        if msg.get('channel') != self.channel.id:
            return

        qs = WorkLog.objects.filter(origin_id=msg['message']['ts'])
        log_list = yield from to_aio(qs.find_all())
        if len(log_list) != 1:
            return
        log = log_list[0]
        log.text = msg['message']['text']
        yield from to_aio(log.save())
Beispiel #4
0
    def handle_message_deleted(self, msg):
        if msg.get('channel') != self.channel.id:
            return

        ts = msg['deleted_ts']
        qs = WorkLog.objects.filter(origin_id=ts)
        log_list = yield from to_aio(qs.find_all())
        if len(log_list) != 1:
            return
        log = log_list[0]
        log.deleted = True
        yield from to_aio(log.save())
Beispiel #5
0
    def handle_message_deleted(self, msg):
        if msg.get('channel') != self.channel.id:
            return

        ts = msg['deleted_ts']
        qs = WorkLog.objects.filter(origin_id=ts)
        log_list = yield from to_aio(qs.find_all())
        if len(log_list) != 1:
            return
        log = log_list[0]
        log.deleted = True
        yield from to_aio(log.save())
Beispiel #6
0
    def handle_message(self, msg):
        if msg.get('channel') != self.channel.id:
            return

        user = self.rtm.find_user(msg['user'])
        log = WorkLog(origin_id=msg['ts'], user_id=user.id, username=user.name,
                      time=datetime.fromtimestamp(float(msg['ts'])), text=msg['text'])
        yield from to_aio(log.save())
Beispiel #7
0
    def handle_message(self, msg):
        if msg.get('channel') != self.channel.id:
            return

        user = self.rtm.find_user(msg['user'])
        log = WorkLog(origin_id=msg['ts'],
                      user_id=user.id,
                      username=user.name,
                      time=datetime.fromtimestamp(float(msg['ts'])),
                      text=msg['text'])
        yield from to_aio(log.save())
Beispiel #8
0
    def run(self):
        self.channel = self.rtm.find_channel_by_name(self.config['channel'])

        nr_top_stories = 30
        sleep_per_story = 5 * 60 / nr_top_stories
        while self.alive:
            url = self.URL_BASE + 'topstories.json'
            top_stories = yield from self.async_get_json(url)
            # Go through the full list every 5 mins
            slept = 0
            for item_nr in top_stories[0:nr_top_stories]:
                if not self.alive:
                    break

                item_nr = str(item_nr)
                # If we have already announced the story, do not even
                # bother fetching it.
                count = yield from to_aio(
                    HackerNewsStory.objects.filter(id=item_nr).count())
                if count:
                    continue

                url = self.URL_BASE + 'item/%s.json' % item_nr
                item = yield from self.async_get_json(url)
                if item['score'] < 100:
                    yield from asyncio.sleep(sleep_per_story)
                    slept += 1
                    continue
                yield from self.announce_new_story(item)
                story = HackerNewsStory(id=str(item['id']),
                                        time=datetime.fromtimestamp(
                                            item['time']))
                yield from to_aio(story.save())

                yield from asyncio.sleep(sleep_per_story)
                slept += 1

            if slept < nr_top_stories:
                yield from asyncio.sleep(
                    (nr_top_stories - slept) * sleep_per_story)