Exemplo n.º 1
0
    def get_stream_refs(self, only_accounts=None, exclude_accounts=None):
        only_accounts = only_accounts or self.only_accounts
        exclude_accounts = exclude_accounts or self.exclude_accounts
        exclude = set(exclude_accounts)
        only = set(only_accounts) - exclude
        from solariat.utils.iterfu import flatten
        from solariat_bottle.utils.tracking import get_channel_post_filters_map, combine_and_split

        channels = set(
            flatten(
                acct.get_current_channels(status__in={'Active', 'Interim'})
                for acct in only))

        channel_filters_map = get_channel_post_filters_map(channels)
        if not channels:
            channel_filters_map = {
                channel: keywords
                for channel, keywords in channel_filters_map.iteritems()
                if channel.account not in exclude
            }

        splitted_filters = combine_and_split(channel_filters_map,
                                             max_track=self.max_track,
                                             max_follow=self.max_follow)
        capacity = auth_pool.get_capacity()
        streams_needed = len(splitted_filters)
        if streams_needed > capacity:
            LOGGER.critical(u"Not enough apps. Required: %d Capacity: %d "
                            u"Omitting %s filters: %s" %
                            (streams_needed, capacity, streams_needed -
                             capacity, splitted_filters[capacity:]))
            splitted_filters = splitted_filters[:capacity]

        return self.store(splitted_filters)
Exemplo n.º 2
0
def postprocess_events(user):
    from solariat_bottle.db.user import set_user

    set_user(user)
    account = user.account

    start = time.time()
    try:
        _postprocess_events(account)

        # TODO: to remove
        # [11/11/16, 5:11:01 PM] Bogdan Neacsa: Hey Vlad, the way the architecture is going to work this is a scheduled task
        # [11/11/16, 5:11:10 PM] Bogdan Neacsa: So it will just restart automatically on next iteration
        # stop = False
        # while not stop:
        #     _postprocess_events(account)
        #     account.reload()
        #     if account.event_processing_needs_restart:
        #         account.update(event_processing_needs_restart=False)
        #         continue
        #     stop = True
    except:
        LOGGER.critical('[DynamicEvents Postprocessing] Cannot process events:', exc_info=True)
    finally:
        account.update(event_processing_lock=False, event_processing_needs_restart=False)

    LOGGER.info('[DynamicEvents Postprocessing] took: %s sec', time.time() - start)