コード例 #1
0
ファイル: Scraper.py プロジェクト: fluxTH/fb-logger
    def saveToDB(self, proxy, overlay, db, full=False):
        if proxy is not None:
            logtype = LogType.CHATPROXY_RELOAD if full else LogType.CHATPROXY_LONGPOLL

            self.printActiveUsers(proxy, full=full)
            db.save(proxy, logtype=logtype.value)

        if overlay is not None:
            logtype = LogType.BUDDYLIST_OVERLAY

            for fbid in overlay:
                tsprint('Overlay: User {} went {}'.format(
                    fbid, 'online' if overlay[fbid]['a'] == 2 else 'offline'))
            db.save(self.normalizeOverlayResponse(overlay),
                    logtype=logtype.value)

        return
コード例 #2
0
ファイル: Logger.py プロジェクト: fluxTH/fb-logger
    def run(self):
        try:
            self.mainLoop()
        except KeyboardInterrupt:
            tsprint('User Quit')
        except DatabaseException as e:
            logging.fatal(e, exc_info=True)
            tserror('Database Error: {}'.format(e))
            traceback.print_tb(e.__traceback__)
        except Exception as e:
            logging.fatal(e, exc_info=True)
            tserror('FATAL: {}'.format(e))
            traceback.print_tb(e.__traceback__)
        finally:
            tsprint('Terminating...')

            # Delete PID file
            os.remove(self.getConfig('pid_file', './fblogger.pid'))
コード例 #3
0
    def migrateSchema(self, to_version):
        migrations_path = self.config.get('migrations',
                                          './fblogger/migrations/')
        migration_filename = 'migration_v{}.sql'

        sc_ver = int(self.getDbConfig('schema_version'))

        if sc_ver >= to_version:
            raise MigrationException(
                'Cannot migrate to current or older schema.')

        c = self.conn.cursor()
        retries = 0
        while sc_ver < to_version:
            if retries >= 3:
                raise MigrationException(
                    'Migration took too many retries to update from v{} to v{}.'
                    .format(sc_ver, next_ver))

            next_ver = sc_ver + 1
            tsprint('Migrating to schema v{}...'.format(next_ver))

            target_migration = os.path.join(
                migrations_path, migration_filename.format(next_ver))
            if not os.path.isfile(target_migration):
                raise MigrationException(
                    'Migration for schema version {} not found: {}'.format(
                        next_ver, target_migration))

            with open(target_migration, 'r') as f:
                sql = f.read()
                c.executescript(sql)

            self.conn.commit()
            sc_ver = int(self.getDbConfig('schema_version'))

            if sc_ver < next_ver:
                retries += 1
            else:
                retries = 0

        tsprint(
            'Database schema successfully migrated to v{}.'.format(to_version))
        return
コード例 #4
0
ファイル: Scraper.py プロジェクト: fluxTH/fb-logger
    def printActiveUsers(self, data, full=False):
        # Update text and mechanism to calculate, currently not correct data

        total = len(data)
        active = 0
        idle = 0

        for fbid in data:
            if 'p' not in data[fbid].keys():
                continue

            status = data[fbid]['p']
            if status == 2:
                active += 1
            if status == 0:
                idle += 1

        mode = 'Full' if full else 'Longpoll'

        tsprint('ChatProxy [{}]: {} active, {} idle, {} total.'.format(
            mode, active, idle, total))
コード例 #5
0
ファイル: Logger.py プロジェクト: fluxTH/fb-logger
    def handleLongpollException(self, exc):
        self.incrementErrorCount(exc, 'longpoll')

        logging.error(exc, exc_info=True)
        tserror('Longpoll {}: {}'.format(exc.__class__.__name__, exc))

        err_count = self.getErrorCount(exc, 'longpoll')
        if err_count > self.getConfig('scraper.longpoll_retry_limit', 3):
            if err_count > self.getConfig('scraper.longpoll_chill_limit', 6):
                # Exit to full request loop
                tsprint(
                    'Chill limit reached after {} retries, exiting longpoll mode.'
                    .format(err_count))
                raise exc

            wait = self.getConfig('scraper.longpoll_chill_timeout', 60)
            tsprint(
                'Longpoll chill threshold reached after {} retries.'.format(
                    err_count))
        else:
            wait = self.getConfig('scraper.longpoll_retry_timeout', 10)

        tsprint('Waiting {}s before retrying longpoll ({})...'.format(
            wait, err_count))
        time.sleep(wait)

        raise ContinueLoop
コード例 #6
0
ファイル: Scraper.py プロジェクト: fluxTH/fb-logger
    def getBuddyList(self):

        self.checkLoadBalancerInfo()

        url = self.getEndpointUrl()
        qs = {
            'clientid': self.client_id,
            'channel': 'p_{}'.format(self.c_user),
            'seq': 1,
            'partition': -2,
            'cb': 'dead',
            'idle': 1,
            'qp': 'y',
            'cap': 8,
            'pws': 'fresh',
            'isq': 254579,
            'msgs_recv': 1,
            'uid': self.c_user,
            'viewer_uid': self.c_user,
            'state': 'active',
            'sticky_token': self.lb_data['sticky'],
            'sticky_pool': self.lb_data['pool']
        }

        data = self.doFbRequest(url, qs)

        if data['t'] == 'lb':
            tsprint('Got "lb" on fullreload, applying then reconnecting...')
            self.updateLoadBalancerInfo(self.parseLoadBalancerInfo(data))
            raise ContinueLoop

        if data['t'] != 'fullReload':
            logging.info('msg on fullreload: {}'.format(data))
            raise InvalidResponse(
                'Expected packet type "fullReload" from getBuddyList, got "{}"'
                .format(data['t']))

        return data
コード例 #7
0
    def checkSchemaUpdates(self):
        try:
            sc_ver = int(self.getDbConfig('schema_version'))
        except KeyError:
            tsprint(
                'DB schema version not found, assuming freshly installed database...'
            )
            sc_ver = self.SCHEMA_VERSION
            self.setDbConfig('schema_version', sc_ver)

        if sc_ver < self.SCHEMA_VERSION:
            # Update schema
            tsprint('Updating database schema from v{} to v{}...'.format(
                sc_ver, self.SCHEMA_VERSION))
            self.migrateSchema(self.SCHEMA_VERSION)
        else:
            tsprint('Database schema up-to-date.')

        return
コード例 #8
0
ファイル: Logger.py プロジェクト: fluxTH/fb-logger
    def mainLoop(self):
        while True:
            try:
                dprint('Initial GET request')

                self.ping()

                resp = self.scraper.getBuddyList()
                chatproxy, overlay = self.scraper.parseFbResponse(resp)

                # reset error counter then save response
                self.resetErrorCounter(only_mode='full')

                self.scraper.saveToDB(chatproxy, overlay, self.db, full=True)

                # Longpoll not enabled
                if self.getConfig('scraper.longpoll', True) is not True:
                    req_wait = self.getConfig('scraper.request_interval', 300)
                    time.sleep(req_wait)

                    chatproxy = None
                    overlay = None

                    continue

                seq = 2

                while True:
                    if 'seq' in resp.keys():
                        seq = resp['seq']

                    chatproxy = None
                    overlay = None

                    self.ping()

                    try:
                        dprint('Polling seq={}'.format(seq))
                        resp = self.scraper.longPoll(seq)

                    # handle failed polling
                    except NetworkError as exc:
                        try:
                            self.handleLongpollException(exc)
                        except ContinueLoop:
                            continue

                    except InvalidResponse as exc:
                        try:
                            self.handleLongpollException(exc)
                        except ContinueLoop:
                            continue

                    # reset error counter then handle response
                    self.resetErrorCounter(only_mode='longpoll')
                    if resp['t'] == 'heartbeat':
                        dprint('Longpoll seq={} heartbeat.'.format(seq))

                    elif resp['t'] == 'lb':
                        tsprint(
                            'Got "lb" on longpoll seq={}, applying then reloading...'
                            .format(seq))
                        self.scraper.updateLoadBalancerInfo(
                            self.scraper.parseLoadBalancerInfo(resp))
                        raise LongPollReload('Got lb from longpoll packet.')

                    elif resp['t'] == 'fullReload':
                        dprint(
                            'Longpoll seq={} returned fullReload, try saving then reloading.'
                            .format(seq))
                        chatproxy, overlay = self.scraper.parseFbResponse(resp)
                        # dict_merge(flist, fb.parseFbResponse(resp))

                        raise LongPollReload(
                            'Got fullReload from longpoll packet.')

                    elif resp['t'] == 'msg':
                        chatproxy, overlay = self.scraper.parseFbResponse(resp)

                    else:
                        raise LongPollReload(
                            'Got unknown packet type "{}".'.format(resp['t']))

                    # save data
                    if chatproxy is not None or overlay is not None:
                        self.scraper.saveToDB(chatproxy, overlay, self.db)

                    # Limit loop to set frequency
                    time.sleep(
                        int(1 / self.getConfig('scraper.loop_frequency', 10)))

            except LongPollReload as m:
                tsprint('Longpoll Reload: {}'.format(m))
                self.scraper.resetSession()
                continue

            except NetworkError as exc:
                try:
                    self.handleFullRequestException(
                        exc,
                        reset_threshold=self.getConfig(
                            'scraper.request_session_limit', 2))
                except ContinueLoop:
                    continue

            except InvalidResponse as exc:
                try:
                    self.handleFullRequestException(
                        exc,
                        reset_threshold=self.getConfig(
                            'scraper.request_session_limit', 2))
                except ContinueLoop:
                    continue

            except ContinueLoop:
                continue