Beispiel #1
0
    def active_pids(self, pids):
        stored_pids = self.get_by_key('pids') or set()
        for pid in stored_pids:
            if pid not in pids:
                LOG.debug('Purge dead uwsgi pid %s from pids list', pid)
                self.delete_by_key('pid:%s' % pid)

        self.set_by_key('pids', pids)

        # remove unneeded updates
        min_update = self._get_update_count()
        for pid in pids:
            n = self.get_by_key('pid:%s' % pid)
            if n:
                if n < min_update:
                    min_update = n

        first_valid_update = self.get_by_key('first_valid_update') or 0
        LOG.debug('Purge polled updates from %(first)s to %(min)s', {
            'first': first_valid_update,
            'min': min_update
        })

        for delete_id_set in utils.make_range(first_valid_update, min_update,
                                              BULK_DELETE_SIZE):
            if not self.memcached.delete_multi(delete_id_set,
                                               key_prefix=UPDATE_ID_PREFIX):
                LOG.critical('Failed to delete_multi from memcached')
                raise Exception('Failed to delete_multi from memcached')

        self.set_by_key('first_valid_update', min_update)
    def active_pids(self, pids):
        stored_pids = self.get_by_key('pids') or set()
        for pid in stored_pids:
            if pid not in pids:
                LOG.debug('Purge dead uwsgi pid %s from pids list', pid)
                self.delete_by_key('pid:%s' % pid)

        self.set_by_key('pids', pids)

        # remove unneeded updates
        min_update = self._get_update_count()
        for pid in pids:
            n = self.get_by_key('pid:%s' % pid)
            if n:
                if n < min_update:
                    min_update = n

        first_valid_update = self.get_by_key('first_valid_update') or 0
        LOG.debug('Purge polled updates from %(first)s to %(min)s',
                  {'first': first_valid_update, 'min': min_update})

        for delete_id_set in utils.make_range(first_valid_update, min_update,
                                              BULK_DELETE_SIZE):
            if not self.memcached.delete_multi(delete_id_set,
                                               key_prefix=UPDATE_ID_PREFIX):
                LOG.critical('Failed to delete_multi from memcached')
                raise Exception('Failed to delete_multi from memcached')

        self.set_by_key('first_valid_update', min_update)
Beispiel #3
0
def export_data(memcached_inst, fd):
    LOG.info('Exporting data from memcached')

    for key in SINGLE_KEYS:
        pickle.dump((key, memcached_inst.get(key)), fd)

    for key in get_repo_keys(memcached_inst):
        pickle.dump((key, memcached_inst.get(key)), fd)

    for key in ARRAY_KEYS:
        key_count = key + ':count'
        count = memcached_inst.get(key_count) or 0
        pickle.dump((key_count, memcached_inst.get(key_count)), fd)

        key_prefix = key + ':'

        for record_id_set in utils.make_range(0, count, BULK_READ_SIZE):
            for k, v in six.iteritems(memcached_inst.get_multi(
                    record_id_set, key_prefix)):
                pickle.dump((key_prefix + str(k), v), fd)

    for user_seq in range(memcached_inst.get('user:count') or 0):
        user = memcached_inst.get('user:%s' % user_seq)
        if user:
            if user.get('user_id'):
                pickle.dump(('user:%s' % user['user_id'], user), fd)
            if user.get('launchpad_id'):
                pickle.dump(('user:%s' % user['launchpad_id'], user), fd)
            for email in user.get('emails') or []:
                pickle.dump(('user:%s' % email, user), fd)
Beispiel #4
0
    def _test_one_range(self, start, end, step):
        elements = set()
        for chunk in utils.make_range(start, end, step):
            for item in chunk:
                self.assertFalse(item in elements)
                elements.add(item)

        self.assertTrue(set(range(start, end)) == elements)
    def _test_one_range(self, start, end, step):
        elements = set()
        for chunk in utils.make_range(start, end, step):
            for item in chunk:
                self.assertFalse(item in elements)
                elements.add(item)

        self.assertTrue(set(range(start, end)) == elements)
Beispiel #6
0
    def _test_one_range(self, start, end, step):
        elements = set()
        for chunk in utils.make_range(start, end, step):
            for item in chunk:
                self.assertNotIn(item, elements)
                elements.add(item)

        self.assertSetEqual(set(range(start, end)), elements)
Beispiel #7
0
    def _test_one_range(self, start, end, step):
        elements = set()
        for chunk in utils.make_range(start, end, step):
            for item in chunk:
                self.assertNotIn(item, elements)
                elements.add(item)

        self.assertSetEqual(set(range(start, end)), elements)
Beispiel #8
0
def export_data(memcached_inst, fd):
    LOG.info('Exporting data from memcached')

    for key in SINGLE_KEYS:
        pickle.dump((key, memcached_inst.get(key)), fd)

    for key in get_repo_keys(memcached_inst):
        pickle.dump((key, memcached_inst.get(key)), fd)

    for key in ARRAY_KEYS:
        key_count = key + ':count'
        count = memcached_inst.get(key_count) or 0
        pickle.dump((key_count, memcached_inst.get(key_count)), fd)

        key_prefix = key + ':'

        for record_id_set in utils.make_range(0, count + 1, BULK_READ_SIZE):
            # memcache limits the size of returned data to specific yet unknown
            # chunk size, the code should verify that all requested records are
            # returned an be able to fall back to one-by-one retrieval

            chunk = memcached_inst.get_multi(record_id_set, key_prefix)
            if len(chunk) < len(record_id_set):
                # retrieve one-by-one
                for record_id in record_id_set:
                    key = key_prefix + str(record_id)
                    pickle.dump((key, memcached_inst.get(key)), fd)
            else:
                # dump the whole chunk
                for k, v in six.iteritems(chunk):
                    pickle.dump((key_prefix + str(k), v), fd)

    for user_seq in range((memcached_inst.get('user:count') or 0) + 1):
        user = memcached_inst.get('user:%s' % user_seq)
        if user:
            if user.get('user_id'):
                pickle.dump(
                    (('user:%s' % user['user_id']).encode('utf8'), user), fd)
            if user.get('launchpad_id'):
                pickle.dump(('user:%s' % user['launchpad_id'], user), fd)
            for hostname, ids in user.get('gerrit_ids', {}).items():
                for gerrit_id in ids:
                    pickle.dump(
                        ('user:gerrit:%s:%s' % (hostname, gerrit_id), user),
                        fd)
            if user.get('member_id'):
                pickle.dump(('user:member:%s' % user['member_id'], user), fd)
            for email in user.get('emails') or []:
                pickle.dump((('user:%s' % email).encode('utf8'), user), fd)
    LOG.info('Exporting data from memcached was completed')
    def get_update(self, pid):
        last_update = self.memcached.get("pid:%s" % pid)
        update_count = self._get_update_count()

        self.memcached.set("pid:%s" % pid, update_count)
        self._set_pids(pid)

        if not last_update:
            for i in self.get_all_records():
                yield i
        else:
            for update_id_set in utils.make_range(last_update, update_count, BULK_READ_SIZE):
                update_set = self.memcached.get_multi(update_id_set, UPDATE_ID_PREFIX).values()
                for i in self.memcached.get_multi(update_set, RECORD_ID_PREFIX).values():
                    yield i
Beispiel #10
0
def export_data(memcached_inst, fd):
    LOG.info('Exporting data from memcached')

    for key in SINGLE_KEYS:
        pickle.dump((key, memcached_inst.get(key)), fd)

    for key in get_repo_keys(memcached_inst):
        pickle.dump((key, memcached_inst.get(key)), fd)

    for key in ARRAY_KEYS:
        key_count = key + ':count'
        count = memcached_inst.get(key_count) or 0
        pickle.dump((key_count, memcached_inst.get(key_count)), fd)

        key_prefix = key + ':'

        for record_id_set in utils.make_range(0, count + 1, BULK_READ_SIZE):
            # memcache limits the size of returned data to specific yet unknown
            # chunk size, the code should verify that all requested records are
            # returned an be able to fall back to one-by-one retrieval

            chunk = memcached_inst.get_multi(record_id_set, key_prefix)
            if len(chunk) < len(record_id_set):
                # retrieve one-by-one
                for record_id in record_id_set:
                    key = key_prefix + str(record_id)
                    pickle.dump((key, memcached_inst.get(key)), fd)
            else:
                # dump the whole chunk
                for k, v in six.iteritems(chunk):
                    pickle.dump((key_prefix + str(k), v), fd)

    for user_seq in range((memcached_inst.get('user:count') or 0) + 1):
        user = memcached_inst.get('user:%s' % user_seq)
        if user:
            if user.get('user_id'):
                pickle.dump((('user:%s' % user['user_id']).encode('utf8'),
                             user), fd)
            if user.get('launchpad_id'):
                pickle.dump(('user:%s' % user['launchpad_id'], user), fd)
            if user.get('gerrit_id'):
                pickle.dump(('user:gerrit:%s' % user['gerrit_id'], user), fd)
            if user.get('member_id'):
                pickle.dump(('user:member:%s' % user['member_id'], user), fd)
            for email in user.get('emails') or []:
                pickle.dump((('user:%s' % email).encode('utf8'), user), fd)
Beispiel #11
0
    def get_update(self, pid):
        last_update = self.get_by_key('pid:%s' % pid)
        update_count = self._get_update_count()

        self.set_by_key('pid:%s' % pid, update_count)
        self._set_pids(pid)

        if not last_update:
            for i in self.get_all_records():
                yield i
        else:
            for update_id_set in utils.make_range(last_update, update_count,
                                                  BULK_READ_SIZE):
                update_set = self.memcached.get_multi(
                    update_id_set, UPDATE_ID_PREFIX).values()
                for i in self.memcached.get_multi(update_set,
                                                  RECORD_ID_PREFIX).values():
                    yield i
    def active_pids(self, pids):
        stored_pids = self.memcached.get("pids") or set()
        for pid in stored_pids:
            if pid not in pids:
                self.memcached.delete("pid:%s" % pid)

        self.memcached.set("pids", pids)

        # remove unneeded updates
        min_update = self._get_update_count()
        for pid in pids:
            n = self.memcached.get("pid:%s" % pid)
            if n:
                if n < min_update:
                    min_update = n

        first_valid_update = self.memcached.get("first_valid_update") or 0
        for delete_id_set in utils.make_range(first_valid_update, min_update, BULK_DELETE_SIZE):
            if not self.memcached.delete_multi(delete_id_set, key_prefix=UPDATE_ID_PREFIX):
                raise Exception("Failed to delete from memcache")
        self.memcached.set("first_valid_update", min_update)
Beispiel #13
0
    def active_pids(self, pids):
        stored_pids = self.memcached.get('pids') or set()
        for pid in stored_pids:
            if pid not in pids:
                self.memcached.delete('pid:%s' % pid)

        self.memcached.set('pids', pids)

        # remove unneeded updates
        min_update = self._get_update_count()
        for pid in pids:
            n = self.memcached.get('pid:%s' % pid)
            if n:
                if n < min_update:
                    min_update = n

        first_valid_update = self.memcached.get('first_valid_update') or 0
        for delete_id_set in utils.make_range(first_valid_update, min_update,
                                              BULK_DELETE_SIZE):
            if not self.memcached.delete_multi(delete_id_set,
                                               key_prefix=UPDATE_ID_PREFIX):
                raise Exception('Failed to delete from memcache')
        self.memcached.set('first_valid_update', min_update)
Beispiel #14
0
 def get_all_records(self):
     for record_id_set in utils.make_range(0, self._get_record_count(),
                                           BULK_READ_SIZE):
         for i in self.memcached.get_multi(record_id_set,
                                           RECORD_ID_PREFIX).values():
             yield i
 def get_all_records(self):
     for record_id_set in utils.make_range(0, self._get_record_count(),
                                           BULK_READ_SIZE):
         for i in self.memcached.get_multi(
                 record_id_set, RECORD_ID_PREFIX).values():
             yield i