コード例 #1
0
def worker(queue, eviction_policy, processes, threads):
    timings = {'get': [], 'set': [], 'delete': []}
    cache = Cache('tmp', eviction_policy=eviction_policy)

    for index, (action, key, value) in enumerate(iter(queue.get, None)):
        start = time.time()

        if action == 'set':
            cache.set(key, value, expire=EXPIRE)
        elif action == 'get':
            result = cache.get(key)
        else:
            assert action == 'delete'
            cache.delete(key)

        stop = time.time()

        if action == 'get' and processes == 1 and threads == 1 and EXPIRE is None:
            assert result == value

        if index > WARMUP:
            timings[action].append(stop - start)

    queue.put(timings)

    cache.close()
コード例 #2
0
ファイル: cache.py プロジェクト: klinger/EsiPy
class FileCache(BaseCache):
    """ BaseCache implementation using files to store the data.
    This implementation uses diskcache.Cache
    see http://www.grantjenks.com/docs/diskcache/api.html#cache for more
    informations

    This cache requires you to install diskcache using `pip install diskcache`
    """
    def __init__(self, path, **settings):
        """ Constructor

        Arguments:
            path {String} -- The path on the disk to save the data
            settings {dict} -- The settings values for diskcache
        """
        from diskcache import Cache
        self._cache = Cache(path, **settings)

    def __del__(self):
        """ Close the connection as the cache instance is deleted.
        Safe to use as there are no circular ref.
        """
        self._cache.close()

    def set(self, key, value, timeout=300):
        expire_time = None if timeout == 0 else timeout
        self._cache.set(_hash(key), value, expire=expire_time)

    def get(self, key, default=None):
        return self._cache.get(_hash(key), default)

    def invalidate(self, key):
        self._cache.delete(_hash(key))
コード例 #3
0
def worker(queue, eviction_policy, processes, threads):
    timings = co.defaultdict(list)
    cache = Cache('tmp', eviction_policy=eviction_policy)

    for index, (action, key, value) in enumerate(iter(queue.get, None)):
        start = time.time()

        try:
            if action == 'set':
                cache.set(key, value, expire=EXPIRE)
            elif action == 'get':
                result = cache.get(key)
            else:
                assert action == 'delete'
                cache.delete(key)
        except Timeout:
            miss = True
        else:
            miss = False

        stop = time.time()

        if (action == 'get' and processes == 1 and threads == 1
                and EXPIRE is None):
            assert result == value

        if index > WARMUP:
            delta = stop - start
            timings[action].append(delta)
            if miss:
                timings[action + '-miss'].append(delta)

    queue.put(timings)

    cache.close()
コード例 #4
0
def worker(queue, eviction_policy):
    timings = {'get': [], 'set': [], 'delete': []}
    cache = Cache('tmp', eviction_policy=eviction_policy)

    for index, (action, key, value) in enumerate(iter(queue.get, None)):
        start = time.time()

        if action == 'set':
            cache.set(key, value, expire=EXPIRE)
        elif action == 'get':
            result = cache.get(key)
        else:
            assert action == 'delete'
            cache.delete(key)

        stop = time.time()

        if action == 'get' and PROCESSES == 1 and THREADS == 1 and EXPIRE is None:
            assert result == value

        if index > WARMUP:
            timings[action].append(stop - start)

    queue.put(timings)

    cache.close()
コード例 #5
0
class CacheProxy:
    def __init__(self, script):
        self.config = get_configs()
        collectors_dir = self.config.get('base', 'collectors_dir')
        self.cache = Cache(
            os.path.join(collectors_dir, 'cache/script/', script))

    def get(self, key):
        return self.cache.get(key)

    def set(self, key, value):
        self.cache.set(key, value)

    def delete(self, key):
        self.cache.delete(key)

    def close(self):
        self.cache.close()

    def counter_to_gauge(self, key, value):
        last_value = self.get(key)
        self.set(key, value)
        if last_value is None:
            return None
        gauge = value - last_value
        if gauge < 0 or gauge > last_value:
            return None
        return gauge
コード例 #6
0
class DiscCacheWrapper():

    DISC_CACHE_PATH = "./tmp/flickr-auth-disc-cache"

    def __init__(self):
        self.disc_cache = Cache(directory=DiscCacheWrapper.DISC_CACHE_PATH)

    def get(self, key):
        # This can raise a diskcache.Timeout error if it fails to talk to its database
        return self.disc_cache.get(key=key, default=None, retry=False)

    def set(self, key, value, timeout=None):
        # This can raise a diskcache.Timeout error if it fails to talk to its database
        self.disc_cache.set(key=key, value=value, expire=timeout, retry=False)

    def delete(self, key):
        self.disc_cache.delete(key=key)
コード例 #7
0
class Cache(object):
    def __init__(self):
        try:
            self.cache = DC('./tmp')
        except Exception as ex:
            print('Get an exception with diskcache open: {}'.format(ex))
            self.cache = None

    def __del__(self):
        try:
            self.cache.close()
        except Exception as ex:
            print('Get an exception with diskcache close: {}'.format(ex))

    def set(self, key, value):
        if self.cache is not None:
            self.cache.set(key, BytesIO(value), read=True, tag=u'data')

    def get(self, key):
        if self.cache is not None:
            value = self.cache.get(key, default=b'', read=True, tag=True)
            if value is not None and value != b'':
                return value
        return None

    def pop(self, key):
        if self.cache is not None:
            value = self.cache.pop(key, default=b'', read=True, tag=True)
            if value is not None and value != b'':
                return value
        return None

    def delete(self, key):
        if self.cache is not None:
            self.cache.delete(key)

    def create_index(self):
        if self.cache is not None:
            self.cache.create_tag_index()
            return self.cache.tag_index
        return None

    def clear_all(self):
        if self.cache is not None:
            self.cache.clear()
コード例 #8
0
class RouteHandler(object):
    def __init__(self, loop, messenger, database):
        self._loop = loop
        self._messenger = messenger
        self._database = database
        self.cache = Cache()

    async def create_election(self, request):
        private_key, public_key, user = await self._authorize(request)
        body = await decode_request(request)
        required_fields = [
            'name', 'description', 'start_timestamp', 'end_timestamp',
            'results_permission', 'can_change_vote', 'can_show_realtime',
            'voting_options', 'poll_book'
        ]
        validate_fields(required_fields, body)

        if user.get('type') != 'ADMIN' and user.get('type') != 'SUPERADMIN':
            raise ApiForbidden('Voter must be an admin or superadmin')

        election_id = uuid.uuid1().hex
        voting_options = body.get('voting_options')
        admin = await self._database.fetch_voter_resource(public_key=public_key
                                                          )

        for voting_option in voting_options:
            if voting_option.get('name').upper(
            ) == "NULL" or voting_option.get('name').upper() == "BLANK":
                raise ApiInternalError('NULL and BLANK are default options')

        voting_options.append({"name": "NULL", "description": "VOTE NULL"})
        voting_options.append({"name": "BLANK", "description": "VOTE BLANK"})

        await self._messenger.send_create_election_transaction(
            private_key=private_key,
            election_id=election_id,
            name=body.get('name'),
            description=body.get('description'),
            start_timestamp=body.get('start_timestamp'),
            end_timestamp=body.get('end_timestamp'),
            results_permission=body.get('results_permission'),
            can_change_vote=body.get('can_change_vote'),
            can_show_realtime=body.get('can_show_realtime'),
            admin_id=admin.get('voter_id'),
            status=1,
            timestamp=get_time())

        for voting_option in voting_options:
            voting_option_id = uuid.uuid1().hex

            await self._messenger.send_create_voting_option_transaction(
                private_key=private_key,
                voting_option_id=voting_option_id,
                name=voting_option.get('name'),
                description=voting_option.get('description'),
                election_id=election_id,
                status=1,
                timestamp=get_time())

            await self._database.insert_voting_option_num_vote_resource(
                voting_option_id=voting_option_id,
                name=voting_option.get('name'),
                election_id=election_id)

        for poll_book in body.get('poll_book'):
            await self._messenger.send_create_poll_registration_transaction(
                private_key=private_key,
                voter_id=poll_book.get('id'),
                name=poll_book.get('name'),
                election_id=election_id,
                status=1,
                timestamp=get_time())

        return json_response({'data': 'Create election transaction submitted'})

    async def create_voter(self, request):
        body = await decode_request(request)
        required_fields = ['voter_id', 'name', 'password']
        validate_fields(required_fields, body)

        if await self._database.is_voter_created(body.get("voter_id")
                                                 ) is not None:
            raise ApiConflict("Voter ID must be unique")

        public_key, private_key = self._messenger.get_new_key_pair()

        await self._messenger.send_create_voter_transaction(
            private_key=private_key,
            voter_id=body.get('voter_id'),
            public_key=public_key,
            name=body.get('name'),
            created_at=get_time(),
            type='VOTER')

        encrypted_private_key = encrypt_private_key(request.app['aes_key'],
                                                    public_key, private_key)
        hashed_password = hash_password(body.get('password'))

        await self._database.create_auth_entry(public_key,
                                               encrypted_private_key,
                                               hashed_password)

        user = {
            'name': body.get('name'),
            'voter_id': body.get('voter_id'),
            'type': 'VOTER'
        }

        token = self.generate_auth_token(request.app['secret_key'], public_key,
                                         user)

        return json_response({'accessToken': token, 'user': user})

    async def update_voter_type(self, request):
        private_key, public_key, user = await self._authorize(request)
        body = await decode_request(request)
        required_fields = ['type']
        validate_fields(required_fields, body)

        voter_id = request.match_info.get('voterId', '')
        if voter_id == '':
            raise ApiBadRequest(
                'The voter ID is a required query string parameter')

        if user.get('type') != 'SUPERADMIN':
            raise ApiForbidden('Forbidden')

        voter = await self._database.fetch_voter_resource(voter_id=voter_id)

        if voter is None:
            raise ApiNotFound('No voter found')

        if body.get('type') == 'ADMIN' and (voter.get('type') == 'ADMIN' or
                                            voter.get('type') == 'SUPERADMIN'):
            raise ApiConflict(
                'Voter {} is already an admin or superadmin'.format(voter_id))
        elif body.get('type') == 'VOTER' and voter.get('type') == 'VOTER':
            raise ApiConflict('Voter {} is already a voter. '.format(voter_id))

        auth_info = await self._database.fetch_auth_resource(
            public_key=voter.get('public_key'))
        voter_private_key = decrypt_private_key(
            request.app['aes_key'], voter.get('public_key'),
            auth_info.get('encrypted_private_key'))

        await self._messenger.send_update_voter_transaction(
            private_key=voter_private_key,
            voter_id=voter_id,
            public_key=voter.get('public_key'),
            name=voter.get('name'),
            created_at=get_time(),
            type=body.get('type'))

        return json_response({
            'voter': {
                'voter_id': voter_id,
                'name': voter.get('name'),
                'type': 'ADMIN'
            }
        })

    async def create_vote(self, request):
        body = await decode_request(request)
        required_fields = []
        validate_fields(required_fields, body)

        private_key, public_key, user = await self._authorize(request)
        voting_option_id = request.match_info.get('votingOptionId', '')

        if voting_option_id == '':
            raise ApiBadRequest(
                'The voting option ID is a required query string parameter')

        voter = await self._database.fetch_voter_resource(public_key=public_key
                                                          )

        if voter is None:
            raise ApiNotFound('Voter with the public_key '
                              '{} was not found'.format(public_key))

        voting_option = await self._database.fetch_voting_option_resource(
            voting_option_id=voting_option_id)
        vo_count_vote = await self._database.fetch_voting_option_num_vote_resource(
            voting_option_id=voting_option_id)

        if voting_option is None:
            raise ApiNotFound('Voting Option with the voting option id '
                              '{} was not found'.format(voting_option_id))

        election_id = voting_option.get('election_id')
        election = await self._database.fetch_election_resource(
            election_id=election_id)

        if election.get('status') == 0:
            raise ApiBadRequest('Election with the election id '
                                '{} is cancelled'.format(election_id))

        current_time = get_time()

        if election.get('end_timestamp') < current_time or election.get(
                'start_timestamp') > current_time:
            raise ApiBadRequest('Not in election time.'.format())

        poll_registration = await self._database.fetch_poll_book_registration(
            voter_id=user.get('voter_id'), election_id=election_id)

        if poll_registration is None:
            raise ApiBadRequest(
                'Voter is not registered in the poll book of the election with the id '
                '{} .'.format(election_id))

        num_votes_update = vo_count_vote.get('num_votes') + 1

        await self._messenger.send_create_vote_transaction(
            private_key=private_key,
            vote_id=uuid.uuid1().hex,
            timestamp=get_time(),
            voter_id=voter.get('voter_id'),
            election_id=voting_option.get('election_id'),
            voting_option_id=voting_option_id)

        await self._database.update_voting_option_num_vote_resource(
            voting_option_id=voting_option_id, num_votes=num_votes_update)

        return json_response({'data': 'Create vote transaction submitted'})

    async def update_vote(self, request):
        private_key, public_key, user = await self._authorize(request)
        body = await decode_request(request)
        required_fields = ['voting_option_id']
        validate_fields(required_fields, body)

        vote_id = request.match_info.get('voteId', '')

        if vote_id == '':
            raise ApiBadRequest(
                'The vote ID is a required query string parameter')

        vote = await self._database.fetch_vote_resource(vote_id=vote_id)
        election_id = vote.get('election_id')

        if vote is None:
            raise ApiNotFound('Vote with the vote id '
                              '{} was not found'.format(vote_id))

        if vote.get('voting_option_id') == body.get('voting_option_id'):
            raise ApiBadRequest('Vote must be different.')

        election = await self._database.fetch_election_resource(
            election_id=election_id)

        if election is None:
            raise ApiNotFound('Election with the election id '
                              '{} was not found'.format(election_id))

        if election.get('can_change_vote') == 0:
            raise ApiInternalError(
                'Election with the election id '
                '{} was not found don\'t permit to change vote'.format(
                    election_id))

        if election.get('can_change_vote') == 0:
            raise ApiInternalError(
                'Election with the election id '
                '{} was not found don\'t permit to change vote'.format(
                    election_id))

        current_time = get_time()

        if election.get('end_timestamp') < current_time or election.get(
                'start_timestamp') > current_time:
            raise ApiBadRequest('Not in election time.'.format())

        if election.get('admin_id') == user.get('voter_id'):
            raise ApiBadRequest(
                'User is not the owner of the election with the id '
                '{} .'.format(election_id))

        new_voting_option_id = body.get('voting_option_id')
        old_voting_option_id = vote.get('voting_option_id')

        old_num_vote = await self._database.fetch_voting_option_num_vote_resource(
            voting_option_id=old_voting_option_id)
        new_num_vote = await self._database.fetch_voting_option_num_vote_resource(
            voting_option_id=new_voting_option_id)
        num_votes_remove = old_num_vote.get('num_votes') - 1
        num_votes_update = new_num_vote.get('num_votes') + 1

        await self._messenger.send_update_vote_transaction(
            private_key=private_key,
            vote_id=vote_id,
            timestamp=get_time(),
            voting_option_id=new_voting_option_id)

        # remove -1 to old voting option
        await self._database.update_voting_option_num_vote_resource(
            voting_option_id=old_voting_option_id, num_votes=num_votes_remove)

        # add +1 to new voting option
        await self._database.update_voting_option_num_vote_resource(
            voting_option_id=new_voting_option_id, num_votes=num_votes_update)

        return json_response({'data': 'Update Vote transaction submitted'})

    async def update_election(self, request):
        private_key, public_key, user = await self._authorize(request)
        body = await decode_request(request)
        election_id = request.match_info.get('electionId', '')

        if election_id == '':
            raise ApiBadRequest(
                'The election ID is a required query string parameter')

        election = await self._database.fetch_election_resource(
            election_id=election_id)

        if election is None:
            raise ApiNotFound('Election with the election id '
                              '{} was not found'.format(election_id))

        current_time = get_time()

        if election.get('start_timestamp') < current_time:
            raise ApiBadRequest('Election with the election id '
                                '{} already start.'.format(election_id))

        if election.get('admin_id') != user.get('voter_id'):
            raise ApiBadRequest(
                'User is not the owner of the election with the id '
                '{} .'.format(election_id))

        await self._messenger.send_update_election_transaction(
            private_key=private_key,
            election_id=election_id,
            name=body.get('name')
            if body.get('name') is not None else election.get('name'),
            description=body.get('description') if body.get('description')
            is not None else election.get('description'),
            start_timestamp=body.get('start_timestamp')
            if body.get('start_timestamp') is not None else
            election.get('start_timestamp'),
            end_timestamp=body.get('end_timestamp')
            if body.get('end_timestamp') is not None else
            election.get('end_timestamp'),
            results_permission=body.get('results_permission')
            if body.get('results_permission') is not None else
            election.get('results_permission'),
            can_change_vote=body.get('can_change_vote')
            if body.get('can_change_vote') is not None else
            election.get('can_change_vote'),
            can_show_realtime=body.get('can_show_realtime')
            if body.get('can_show_realtime') is not None else
            election.get('can_show_realtime'),
            admin_id=user.get('voter_id'),
            status=body.get('status')
            if body.get('status') is not None else election.get('status'),
            timestamp=get_time())

        if body.get('voting_options') is not None:
            for voting_option in body.get('voting_options'):
                voting_option_id = uuid.uuid1().hex

                await self._messenger.send_create_voting_option_transaction(
                    private_key=private_key,
                    voting_option_id=voting_option_id,
                    name=voting_option.get('name'),
                    description=voting_option.get('description'),
                    election_id=election_id,
                    status=1,
                    timestamp=get_time())

                await self._database.insert_voting_option_num_vote_resource(
                    voting_option_id=voting_option_id,
                    name=voting_option.get('name'),
                    election_id=election_id)
        if body.get('poll_book') is not None:
            for poll_book in body.get('poll_book'):
                await self._messenger.send_create_poll_registration_transaction(
                    private_key=private_key,
                    voter_id=poll_book.get('id'),
                    name=poll_book.get('name'),
                    election_id=election_id,
                    status=1,
                    timestamp=get_time())

        return json_response({'data': 'Update Election transaction submitted'})

    async def get_election(self, request):
        private_key, public_key, user = await self._authorize(request)

        election_id = request.match_info.get('electionId', '')

        if election_id == '':
            raise ApiBadRequest(
                'The election ID is a required query string parameter')

        if 'asAdmin' in request.rel_url.query:
            election = await self._database.fetch_election_with_can_vote_resource_admin(
                voter_id=user.get('voter_id'), election_id=election_id)
        else:
            election = await self._database.fetch_election_with_can_vote_resource(
                voter_id=user.get('voter_id'), election_id=election_id)

        if election is None:
            raise ApiNotFound('Election with the election id '
                              '{} was not found'.format(election_id))

        if election.get('results_permission') != 'PUBLIC':
            if election.get('can_vote') is False and election.get(
                    'admin_id') != user.get('voter_id'):
                raise ApiForbidden(
                    'Voter is not registered in the poll book of the election with the id '
                    '{}.'.format(election_id))

        return json_response(election)

    async def get_election_votes(self, request):
        private_key, public_key, user = await self._authorize(request)
        election_id = request.match_info.get('electionId', '')

        if election_id == '':
            raise ApiBadRequest(
                'The election ID is a required query string parameter')

        number_of_votes = await self._database.fetch_number_of_votes(
            election_id=election_id)

        if number_of_votes is None:
            raise ApiNotFound('No voting options with the election id '
                              '{} was not found'.format(election_id))

        election = await self._database.fetch_election_resource(
            election_id=election_id)

        if election is None:
            raise ApiNotFound('Election with the election id '
                              '{} was not found'.format(election_id))

        if election.get('results_permission') != 'PUBLIC':

            poll_registration = await self._database.fetch_poll_book_registration(
                voter_id=user.get('voter_id'), election_id=election_id)

            if poll_registration is None and election.get(
                    'admin_id') != user.get('voter_id'):
                raise ApiBadRequest(
                    'Voter is not registered in the poll book of the election with the id '
                    '{} .'.format(election_id))

        return json_response(number_of_votes)

    async def get_poll_registrations(self, request):
        private_key, public_key, user = await self._authorize(request)
        election_id = request.match_info.get('electionId', '')

        if election_id == '':
            raise ApiBadRequest(
                'The election ID is a required query string parameter')

        poll_book = await self._database.fetch_poll_book(
            election_id=election_id)

        if poll_book is None:
            raise ApiNotFound('No voters with the election id '
                              '{} was not found'.format(election_id))

        election = await self._database.fetch_election_resource(
            election_id=election_id)

        if election is None:
            raise ApiNotFound('Election with the election id '
                              '{} was not found'.format(election_id))

        if election.get('admin_id') != user.get('voter_id'):
            raise ApiBadRequest(
                'User is not the owner of the election with the id '
                '{} .'.format(election_id))

        return json_response(poll_book)

    async def count_poll_registrations(self, request):
        private_key, public_key, user = await self._authorize(request)
        election_id = request.match_info.get('electionId', '')

        if election_id == '':
            raise ApiBadRequest(
                'The election ID is a required query string parameter')

        count_poll_book = await self._database.count_poll_book(
            election_id=election_id)

        election = await self._database.fetch_election_resource(
            election_id=election_id)

        if election is None:
            raise ApiNotFound('Election with the election id '
                              '{} was not found'.format(election_id))

        if election.get('results_permission') != 'PUBLIC':

            poll_registration = await self._database.fetch_poll_book_registration(
                voter_id=user.get('voter_id'), election_id=election_id)

            if poll_registration is None and election.get(
                    'admin_id') != user.get('voter_id'):
                raise ApiBadRequest(
                    'Voter is not registered in the poll book of the election with the id '
                    '{} .'.format(election_id))

        return json_response(count_poll_book)

    async def list_voting_options_election(self, request):
        private_key, public_key, user = await self._authorize(request)
        election_id = request.match_info.get('electionId', '')

        if election_id == '':
            raise ApiBadRequest(
                'The election ID is a required query string parameter')

        voting_options = await self._database.fetch_election_voting_options_resource(
            election_id=election_id)

        if voting_options is None:
            raise ApiNotFound('Voting Options in the election id '
                              '{} was not found'.format(election_id))

        election = await self._database.fetch_election_resource(
            election_id=election_id)

        if election is None:
            raise ApiNotFound('Election with the election id '
                              '{} was not found'.format(election_id))

        if election.get('results_permission') != 'PUBLIC':

            poll_registration = await self._database.fetch_poll_book_registration(
                voter_id=user.get('voter_id'), election_id=election_id)

            if poll_registration is None and election.get(
                    'admin_id') != user.get('voter_id'):
                raise ApiForbidden(
                    'Voter is not registered in the poll book of the election with the id '
                    '{} .'.format(election_id))

        return json_response(voting_options)

    async def get_voting_option(self, request):
        private_key, public_key, user = await self._authorize(request)
        voting_option_id = request.match_info.get('votingOptionId', '')

        if voting_option_id == '':
            raise ApiBadRequest(
                'The voting option ID is a required query string parameter')

        voting_option = await self._database.fetch_voting_option_resource(
            voting_option_id=voting_option_id)

        if voting_option is None:
            raise ApiNotFound('No voting options with the id '
                              '{} was not found'.format(voting_option_id))

        return json_response(voting_option)

    async def update_voting_option_status(self, request):
        private_key, public_key, user = await self._authorize(request)
        voting_option_id = request.match_info.get('votingOptionId', '')

        if voting_option_id == '':
            raise ApiBadRequest(
                'The voting option ID is a required query string parameter')

        voting_option = await self._database.fetch_voting_option_resource(
            voting_option_id=voting_option_id)

        if voting_option is None:
            raise ApiNotFound('Voting Option with the voting option id '
                              '{} was not found'.format(voting_option_id))

        election_id = voting_option.get('election_id')
        election = await self._database.fetch_election_resource(
            election_id=election_id)

        if election is None:
            raise ApiNotFound('Election with the election id '
                              '{} was not found'.format(election_id))

        current_time = get_time()

        if election.get('start_timestamp') < current_time:
            raise ApiBadRequest('Election with the election id '
                                '{} already start.'.format(election_id))

        if election.get('admin_id') != user.get('voter_id'):
            raise ApiBadRequest(
                'User is not the owner of the election with the id '
                '{} .'.format(election_id))

        if voting_option.get('status') is True:
            status = 0
        else:
            status = 1

        await self._messenger.send_update_voting_option_status_transaction(
            private_key=private_key,
            voting_option_id=voting_option_id,
            name=voting_option.get('name'),
            description=voting_option.get('description'),
            election_id=voting_option.get('election_id'),
            status=status,
            timestamp=get_time())

        return json_response(
            {'data': 'Update Voting Option Status transaction submitted'})

    async def update_poll_book_status(self, request):
        private_key, public_key, user = await self._authorize(request)
        voter_id = request.match_info.get('voterId', '')

        if voter_id == '':
            raise ApiBadRequest(
                'The voter ID is a required query string parameter')

        election_id = request.match_info.get('electionId', '')

        if election_id == '':
            raise ApiBadRequest(
                'The election ID is a required query string parameter')

        voter_poll_book = await self._database.fetch_poll_book_registration(
            election_id=election_id, voter_id=voter_id)

        if voter_poll_book is None:
            raise ApiNotFound('Voter with the voter id '
                              '{} was not found'.format(voter_id))

        election = await self._database.fetch_election_resource(
            election_id=election_id)

        if election is None:
            raise ApiNotFound('Election with the election id '
                              '{} was not found'.format(election_id))

        current_time = get_time()

        if election.get('start_timestamp') < current_time:
            raise ApiBadRequest('Election with the election id '
                                '{} already start.'.format(election_id))

        if election.get('admin_id') != user.get('voter_id'):
            raise ApiBadRequest(
                'User is not the owner of the election with the id '
                '{} .'.format(election_id))

        if voter_poll_book.get('status') is True:
            status = 0
        else:
            status = 1

        await self._messenger.send_update_voter_poll_book_status_transaction(
            private_key=private_key,
            voter_id=voter_id,
            name=voter_poll_book.get('name'),
            election_id=election_id,
            status=status,
            timestamp=get_time())

        return json_response(
            {'data': 'Update Poll Registration Status transaction submitted'})

    async def list_elections_current(self, request):
        private_key, public_key, user = await self._authorize(request)
        voterId = request.match_info.get('voterId', '')

        if voterId == '':
            raise ApiBadRequest(
                'The voter ID is a required query string parameter')

        if user.get('voter_id') != voterId:
            raise ApiForbidden('Admin must be the authenticated one')

        current_elections_list = await self._database.fetch_current_elections_resources(
            voterId, get_time())
        return json_response(current_elections_list)

    async def list_elections_past(self, request):
        private_key, public_key, user = await self._authorize(request)
        voterId = request.match_info.get('voterId', '')

        if voterId == '':
            raise ApiBadRequest(
                'The voter ID is a required query string parameter')

        if user.get('voter_id') != voterId:
            raise ApiForbidden('Admin must be the authenticated one')

        past_elections_list = await self._database.fetch_past_elections_resources(
            voterId, get_time())

        return json_response(past_elections_list)

    async def list_admin_elections(self, request):
        private_key, public_key, user = await self._authorize(request)

        voter_id = request.match_info.get('voterId', '')

        if voter_id == '':
            raise ApiBadRequest(
                'The voter ID is a required query string parameter')

        if user.get('voter_id') != voter_id:
            raise ApiForbidden('Admin must be the authenticated one')

        if user.get('type') != 'ADMIN' and user.get('type') != 'SUPERADMIN':
            raise ApiForbidden('Voter must be an admin or superadmin')

        admin_elections_list = await self._database.fetch_admin_elections_resources(
            user.get('voter_id'))
        return json_response(admin_elections_list)

    async def list_public_elections(self, request):
        private_key, public_key, user = await self._authorize(request)

        public_elections_list = await self._database.fetch_public_elections_resources(
            get_time())
        return json_response(public_elections_list)

    async def list_public_past_elections(self, request):
        private_key, public_key, user = await self._authorize(request)

        past_elections_list = await self._database.fetch_public_past_elections_resources(
            user.get('voter_id'), get_time())

        return json_response(past_elections_list)

    async def list_admins(self, request):
        private_key, public_key, user = await self._authorize(request)

        if user.get('type') != 'SUPERADMIN':
            raise ApiForbidden('Forbidden')

        admin_list = await self._database.fetch_admins_resources()

        return json_response(admin_list)

    async def get_voters(self, request):
        private_key, public_key, user = await self._authorize(request)

        if user.get('type') != 'SUPERADMIN':
            raise ApiForbidden('Forbidden')

        voter_id = request.match_info.get('voterId', '')
        voters_list = await self._database.fetch_voters_resources(
            voter_id=voter_id)

        return json_response(voters_list)

    async def get_vote(self, request):
        private_key, public_key, user = await self._authorize(request)
        vote_id = request.match_info.get('voteId', '')

        if vote_id == '':
            raise ApiBadRequest(
                'The vote ID is a required query string parameter')

        vote = await self._database.fetch_vote_resource(vote_id=vote_id)

        if vote is None:
            raise ApiNotFound('Vote with the vote id '
                              '{} was not found'.format(vote_id))

        election_id = vote.get('election_id')
        election = await self._database.fetch_election_resource(
            election_id=election_id)

        if election is None:
            raise ApiNotFound('Election with the election id '
                              '{} was not found'.format(election_id))

        return json_response(vote)

    async def get_vote_election(self, request):
        private_key, public_key, user = await self._authorize(request)
        voter_id = request.match_info.get('voterId', '')

        if voter_id == '':
            raise ApiBadRequest(
                'The voter ID is a required query string parameter')

        election_id = request.match_info.get('electionId', '')

        if election_id == '':
            raise ApiBadRequest(
                'The election ID is a required query string parameter')

        if user.get('voter_id') != voter_id:
            raise ApiForbidden('Admin must be the authenticated one')

        vote = await self._database.fetch_my_vote__election_resource(
            voter_id=voter_id, election_id=election_id)

        return json_response(vote)

    async def authenticate(self, request):
        body = await decode_request(request)
        required_fields = ['voter_id', 'password']
        validate_fields(required_fields, body)

        password = bytes(body.get('password'), 'utf-8')

        voter = await self._database.fetch_voter_resource(
            voter_id=body.get('voter_id'))
        if voter is None:
            raise ApiUnauthorized('Incorrect voter_id or password')

        auth_info = await self._database.fetch_auth_resource(
            public_key=voter.get('public_key'))
        if auth_info is None:
            raise ApiUnauthorized('No voter with that public key exists')

        hashed_password = auth_info.get('hashed_password')
        if not bcrypt.checkpw(password, bytes.fromhex(hashed_password)):
            raise ApiUnauthorized('Incorrect public key or password')

        user = {
            'name': voter.get('name'),
            'voter_id': body.get('voter_id'),
            'type': voter.get('type')
        }

        token = self.generate_auth_token(request.app['secret_key'],
                                         voter.get('public_key'), user)

        return json_response({'accessToken': token, 'user': user})

    async def _authorize(self, request):
        token = request.headers.get('AUTHORIZATION')
        if token is None:
            raise ApiUnauthorized('No auth token provided')
        token_prefixes = ('Bearer', 'Token')
        for prefix in token_prefixes:
            if prefix in token:
                token = token.partition(prefix)[2].strip()
        try:
            token_dict = self.deserialize_auth_token(request.app['secret_key'],
                                                     token)
        except BadSignature:
            raise ApiUnauthorized('Invalid auth token')
        public_key = token_dict.get('public_key')

        auth_resource = await self._database.fetch_auth_resource(
            public_key=public_key)
        if auth_resource is None:
            raise ApiUnauthorized('Token is not associated with an agent')

        user = self.cache.get(token)
        return decrypt_private_key(
            request.app['aes_key'], public_key,
            auth_resource['encrypted_private_key']), public_key, user

    async def logout(self, request):
        await self._authorize(request)
        token = request.headers.get('AUTHORIZATION')
        token_prefixes = ('Bearer', 'Token')
        for prefix in token_prefixes:
            if prefix in token:
                token = token.partition(prefix)[2].strip()

        self.cache.delete(token)
        return json_response("Successful logout")

    def generate_auth_token(self, secret_key, public_key, user):
        serializer = Serializer(secret_key, expires_in=3600)
        token = serializer.dumps({'public_key': public_key})
        decoded_token = token.decode('ascii')
        self.cache.set(decoded_token, user, expire=3600)
        self.cache.close()
        return decoded_token

    def deserialize_auth_token(self, secret_key, token):
        token_status = self.cache.get(token)
        if token_status is None:
            raise BadSignature("")

        serializer = Serializer(secret_key)
        return serializer.loads(token)
コード例 #9
0
class OptimizationDriver():
    """
    Object to interface the HOPP design analysis methods (i.e., sampling and optimization)

    .. TODO: We might want to rename this to ProblemDriver? As it is required for non-optimization runs.

    Parameters
    ----------
    time_limit : initializer(float), optional
        Total time limit in seconds

    eval_limit : initializer(float), optional
        Objective evaluation limit (counts new evaluations only)
    
    obj_limit : initializer(float), optional
        Lower bound of objective, exit if best objective is less than this

    n_proc : initializer(int), optional
        Maximum number of objective process workers

    cach_dir : initializer(str), optional
        Filename for the driver cache file

    reconnect_cache : initializer(bool), optional
        ``True`` if the driver should reconnect to a previous result cache

    write_csv : initializer(bool), optional
        ``True`` if the cached results should be written to csv format files

    dataframe_file : initializer(str), optional
        Filename for the driver cache dataframe file

    csv_file : initializer(str), optional
        filename for the driver cache csv file (if ``write_csv`` is ``True``)

    scaled : initializer(bool), optional
        ``True`` if the sample/optimizer candidates need to be scaled to problem units
    
    retry : initializer(bool), optional
        ``True`` if any evaluations ending in an exception should be retried on restart
    """
    DEFAULT_KWARGS = dict(
        time_limit=np.inf,  # total time limit in seconds
        eval_limit=np.
        inf,  # objective evaluation limit (counts new evaluations only)
        obj_limit=-np.
        inf,  # lower bound of objective, exit if best objective is less than this
        n_proc=multiprocessing.cpu_count() -
        4,  # maximum number of objective process workers
        cache_dir='driver_cache',  # filename for the driver cache file
        reconnect_cache=
        False,  # True if the driver should reconnect to a previous result cache
        write_csv=
        False,  # True if the cached results should be written to csv format files
        dataframe_file=
        'study_results.df.gz',  # filename for the driver cache dataframe file
        csv_file='study_results.csv',  # filename for the driver cache csv file
        scaled=
        True,  # True if the sample/optimizer candidates need to be scaled to problem units
        retry=True
    )  # True if any evaluations ending in an exception should be retried on restart

    def __init__(self, setup: Callable, **kwargs) -> None:
        """
        Object to interface the HOPP optimization problem with humpday optimizers

        :param setup: Function which creates and returns a new instance of the optimization problem
        :param kwargs: Optional keyword arguments to change driver options (see DEFAULT_KWARGS)
        """

        self.setup = setup
        self.problem = setup(
        )  # The driver needs an instance of the problem to access problem.candidate_from()
        self.parse_kwargs(kwargs)

        self.best_obj = None
        self.cache_info = dict(hits=0, misses=0, size=0, total_evals=0)
        self.meta = dict()

        self.get_candidate = self.problem.candidate_from_unit_array if self.options['scaled'] \
            else self.problem.candidate_from_array  # Function to create formatted design candidates
        self.start_time = None
        self.force_stop = False
        self.eval_count = 0

        if not self.options['reconnect_cache']:
            i = 1
            check_dir = self.options['cache_dir']

            while os.path.isdir(check_dir):
                check_dir = f"{self.options['cache_dir']}_{i}"
                i += 1

            self.options['cache_dir'] = check_dir

        self.cache = Cache(self.options['cache_dir'],
                           disk=JSONDisk,
                           disk_compress_level=9,
                           cull_limit=0,
                           statistics=1,
                           eviction_policy='none')
        self.start_len = len(self.cache)
        self.read_cache()

    def parse_kwargs(self, kwargs: dict) -> None:
        """
        Helper function to set defaults and update options with user-provided input

        :param kwargs: Using ``**kwargs`` this is a dict of keyword arguments provided by the user
        :return: None
        """
        self.options = self.DEFAULT_KWARGS.copy()

        for key, value in kwargs.items():
            if key in self.options:
                self.options[key] = value
            else:
                print(f"Ignoring unknown driver option {key}={value}")

    def init_parallel_workers(self, num_workers: int) -> None:
        """
        Create the communication queue, cache dictionary, thread lock, and worker processes

        :param num_workers: Number of process-independent workers, which evaluate the objective.
        :return:
        """

        if not hasattr(self, 'tasks'):
            self.tasks = multiprocessing.JoinableQueue()
            self.lock = threading.Lock()

        print(f"Creating {num_workers} workers")
        self.workers = [
            Worker(self.tasks, self.cache, self.setup)
            for _ in range(num_workers)
        ]

        # Start the workers polling the task queue
        for w in self.workers:
            w.start()

    def cleanup_parallel(self) -> None:
        """
        Cleanup all worker processes, signal them to exit cleanly, mark any pending tasks as complete

        :return: None
        """

        # If the driver receives a KeyboardInterrupt then the task queue needs to be emptied
        if self.force_stop:
            try:
                # Mark all tasks complete
                while True:
                    self.tasks.get(block=False)
                    self.tasks.task_done()

            # Occurs when task queue is empty
            except queue.Empty:
                pass

        else:
            # Exit normally, None task signals each worker to exit
            for i in range(len(self.workers)):
                self.tasks.put((None, 'worker exit'))

        # Wait for all of the tasks to finish
        self.tasks.join()
        for w in self.workers:
            w.join()
            del w

    def check_interrupt(self) -> None:
        """
        Check optional stopping criteria, these are specified by the user in the driver options

        :return: None
        """
        if self.force_stop:
            raise OptimizerInterrupt

        elapsed = time.time() - self.start_time
        if elapsed > self.options['time_limit']:
            # print(f"Driver exiting, time limit: {self.options['time_limit']} secs")
            # logging.info(f"Driver exiting, time limit: {self.options['time_limit']} secs")
            raise OptimizerInterrupt

        if self.eval_count >= self.options['eval_limit']:
            # print(f"Driver exiting, eval limit: {self.options['eval_limit']}")
            # logging.info(f"Driver exiting, eval limit: {self.options['eval_limit']}")
            raise OptimizerInterrupt

        if (self.best_obj
                is not None) and (self.best_obj <= self.options['obj_limit']):
            # print(f"Driver exiting, obj limit: {self.options['obj_limit']}")
            # logging.info(f"Driver exiting, obj limit: {self.options['obj_limit']}")
            raise OptimizerInterrupt

    def print_log_header(self) -> None:
        """
        Print a linear solver-style log header.

        """
        self.log_headers = ['N_Evals', 'Objective', 'Eval_Time', 'Total_Time']
        self.log_widths = [len(header) + 5 for header in self.log_headers]

        print()
        print("##### HOPP Optimization Driver #####".center(
            sum(self.log_widths)))
        print("Driver Options:", self.options, sep="\n\t")
        print("Optimizer Options:", self.opt_names, sep="\n\t")
        print()
        print("".join(
            (val.rjust(width)
             for val, width in zip(self.log_headers, self.log_widths))))

    def print_log_line(self, reason, obj, eval_time) -> None:
        """
        Print a linear solver-style log line.

        :param info: Dictionary containing at least the evaluation time of the last iteration and reason why a log
            line is being printed. Originally lines would be printed for a hit on the cache (denoted by a ``c`` prefix on the
            the line, but this was removed, and lines are now only printed on new evaluations for conciseness.

        :returns: None
        """
        prefix_reasons = {'cache_hit': 'c ', 'new_best': '* ', '': ''}
        prefix = prefix_reasons[reason]
        best_objective_str = f"{obj:8g}" if obj is not None else "NA"

        curr_time = time.time()
        log_values = [
            prefix + str(self.eval_count), f"{best_objective_str}",
            f"{eval_time / 60:.2f} min",
            f"{(curr_time - self.start_time) / 60:.2f} min"
        ]
        print("".join((val.rjust(width)
                       for val, width in zip(log_values, self.log_widths))))

    def print_log_end(self, best_candidate, best_objective):
        candidate_str = str(best_candidate) \
            .replace('[', '[\n    ', 1) \
            .replace('], ', '],\n    ') \
            .replace(']]', ']\n  ]')

        print()
        print(f"Best Objective: {best_objective:.2f}")
        print(f"Best Candidate:\n  {candidate_str}")

    def write_cache(self, pd_filename=None, csv_filename=None) -> None:
        """
        Write driver cache out to pickle file

        :param filename: Optional path of file to write out the cache to
        :return:  None
        """
        # if self.start_len == len(self.cache) - 1:
        # print(f"no new entries in cache ({len(self.cache) - 1} results), skipping write...")
        # return

        dt_string = datetime.now().strftime('%Y-%m-%d_%H.%M.%S')

        pd_dir = os.path.join(self.options['cache_dir'], '_dataframe',
                              dt_string)
        if not os.path.isdir(pd_dir):
            os.makedirs(pd_dir)

        if pd_filename is None:
            pd_filename = os.path.join(pd_dir, self.options['dataframe_file'])
        else:
            pd_filename = os.path.join(pd_dir, pd_filename)

        if self.options['write_csv'] or csv_filename is not None:
            csv_dir = os.path.join(self.options['cache_dir'], '_csv',
                                   dt_string)
            if not os.path.isdir(csv_dir):
                os.makedirs(csv_dir)

            if csv_filename is None:
                csv_filename = os.path.join(csv_dir, self.options['csv_file'])
            else:
                csv_filename = os.path.join(csv_dir, csv_filename)

        # Gather meta data
        self.meta['cache_info'] = self.cache_info.copy()
        self.meta['driver_options'] = self.options
        self.meta['start_time'] = self.start_time
        self.meta['candidate_fields'] = self.problem.candidate_fields
        self.meta['design_variables'] = self.problem.design_variables
        self.meta['fixed_variables'] = self.problem.fixed_variables

        try:
            self.meta['problem_setup'] = inspect.getsource(self.setup)
        except TypeError:
            if isinstance(self.setup, partial):
                self.meta['problem_setup'] = inspect.getsource(self.setup.func) \
                                             + '__args__' + str(self.setup.args) + '\n' \
                                             + '__keywords__' + str(self.setup.keywords) + '\n'

        try:
            self.meta['sim_setup'] = inspect.getsource(
                self.problem.init_simulation)
        except TypeError:
            if isinstance(self.problem.init_simulation, partial):
                self.meta['sim_setup'] = inspect.getsource(self.problem.init_simulation.func) \
                                             + '__args__' + str(self.problem.init_simulation.args) + '\n' \
                                             + '__keywords__' + str(self.problem.init_simulation.keywords) + '\n'

        try:
            self.meta['eval_obj'] = inspect.getsource(
                self.problem.evaluate_objective)
        except TypeError:
            if isinstance(self.problem.evaluate_objective, partial):
                self.meta['eval_obj'] = inspect.getsource(self.problem.evaluate_objective.func) \
                                             + '__args__' + str(self.problem.evaluate_objective.args) + '\n' \
                                             + '__keywords__' + str(self.problem.evaluate_objective.keywords) + '\n'

        self.cache['meta'] = self.meta.copy()

        self.start_len = len(self.cache) - 1
        print(
            f"writing {len(self.cache) - 1} results to dataframe {pd_filename}..."
        )

        data_list = []
        candidate_sep = self.problem.sep
        pandas_sep = '__'

        for candidate in self.cache:
            if candidate == 'meta':
                continue

            result = self.cache.get(candidate)

            if not isinstance(result, dict):
                self.cache.delete(candidate)
                continue

            # row = dict()
            #
            # for key, value in candidate:
            #     key = key.replace(candidate_sep, pandas_sep)
            #     row[key] = value
            #
            # row.update(flatten_dict(result))
            data_list.append(result)

        df = pd.DataFrame(data_list)
        df.attrs = self.meta
        df.to_pickle(pd_filename)

        ### new code to write to csv(s)
        if csv_filename is not None:
            all_cols = sorted(df.columns)
            scalar_cols = [
                col for col in all_cols
                if isinstance(df[col].loc[0], np.float64)
            ]
            iterable_cols = [
                col for col in all_cols
                if not isinstance(df[col].loc[0], np.float64)
            ]

            df[scalar_cols].to_csv(csv_filename)

            for i in range(len(df)):
                df_row = pd.concat(
                    [pd.Series(df[col].loc[i]) for col in iterable_cols],
                    axis=1,
                    keys=iterable_cols)
                row_filename = os.path.join(csv_dir, f"{i}.csv")
                df_row.to_csv(row_filename)

    def read_cache(self) -> None:
        """
        Read the driver cache from file

        :param filename: Optional path of file to read the cache from
        :return: None
        """
        if len(self.cache) > 0:
            try:
                self.cache_info.update(self.cache['meta']['cache_info'])
                self.meta.update(self.cache['meta'].copy())

            except KeyError:
                pass

        return

    def wrapped_parallel_objective(self):
        """
        This method implements the logic to check if a candidate is in the cache, or is pending evaluation, or neither.
        Each optimizer thread needs its own copy of this method since they don't have access to the driver object, we
        can implement this by wrapping this method and returning the wrapped function. This allows the optimizer threads
        to share the driver object without explicitly passing it to them, and allows them to all use the shared task
        queue and driver cache.

        :return: None
        """
        eval_count = 0

        @wraps(self.wrapped_parallel_objective)
        def p_wrapper(*args, name=None, idx=None, objective=None):
            """
            Objective function the optimizer threads call, assumes a parallel structure and avoids any re-calculations
                - Check if candidate is in cache, if so return objective stored in cache
                - If not, check if candidate is in queue (indicated by integer value in cache), wait for signal
                - If not, objective needs to be calculated, add candidate to task queue, poll cache for return,
                    and finally signal any threads waiting on the same candidate

            :param args: Follows the optimizer's convention of objective inputs (typically an array of floats)
            :param name: Caller name to insert into the result dictionary
            :param idx: Thread index, used for signal conditions
            :param objective_keys: Ordered list of keys to get the objective from the result dictionary
            :return: the numeric value being optimized
            """
            nonlocal eval_count
            eval_count += 1

            self.check_interrupt()
            candidate = self.get_candidate(*args)
            self.cache_info['total_evals'] += 1
            obj = None

            try:
                # Check if result in cache, throws KeyError if not
                self.lock.acquire()
                result = self.cache[candidate]
                # print(f"cache hit {self.cache_info['total_evals']}")
                self.lock.release()
                self.cache_info['hits'] += 1

                if isinstance(result, int):
                    # In cache but not complete, wait for complete signal
                    signal = self.conditions[result]
                    with signal:
                        signal.wait()

                    result = self.cache[candidate]

                if not isinstance(result, dict):
                    self.force_stop = True
                    self.check_interrupt()

                if 'exception' in result.keys():
                    if self.options['retry']:
                        with self.lock:
                            self.cache.delete(candidate)

                        raise KeyError

                # Result available in cache, no work needed
                # Append this caller name to the result dictionary
                with self.lock:
                    result['caller'].append((name, eval_count))
                    self.cache[candidate] = result

            except KeyError:
                # Candidate not in cache, nor waiting in queue
                self.cache[
                    candidate] = idx  # indicates waiting condition for any other thread

                # Insert candidate and caller information into task queue
                self.tasks.put((candidate, (name, eval_count)))

                self.lock.release()
                self.cache_info['misses'] += 1

                # Poll cache for available result (unclear how this could be a threading.Condition signal)
                result = self.cache[candidate]
                while isinstance(result, int):
                    time.sleep(10)
                    result = self.cache[candidate]

                # Signal any other threads waiting on the same candidate
                signal = self.conditions[idx]
                with signal:
                    signal.notifyAll()

                # KeyboardInterrupt places a OptimizerInterrupt in the cache to signal a force_stop
                if not isinstance(result, dict):
                    self.force_stop = True
                    self.check_interrupt()

                # Update best best objective if needed, and print a log line to console
                if objective is not None:
                    obj = objective(result)

                    if (self.best_obj is None) or (obj < self.best_obj):
                        self.best_obj = obj
                        reason = 'new_best'
                    else:
                        reason = ''
                else:
                    reason = ''

                with self.lock:
                    self.eval_count += 1
                    self.print_log_line(reason, obj, result['eval_time'])

                self.cache_info['size'] += 1

            return obj

        return p_wrapper

    def wrapped_objective(self):
        """
        This method implements the logic to check if a candidate is in the cache, or is pending evaluation, or neither.
        Each optimizer thread needs its own copy of this method since they don't have access to the driver object, we
        can implement this by wrapping this method and returning the wrapped function. This allows the optimizer threads
        to share the driver object without explicitly passing it to them, and allows them to all use the shared task
        queue and driver cache.

        :return: None
        """
        eval_count = 0

        @wraps(self.wrapped_objective)
        def s_wrapper(*args, name=None, objective=None):
            """
            Objective function the optimizer threads call, assumes a parallel structure and avoids any re-calculations
                - Check if candidate is in cache, if so return objective stored in cache
                - If not, check if candidate is in queue (indicated by integer value in cache), wait for signal
                - If not, objective needs to be calculated, add candidate to task queue, poll cache for return,
                    and finally signal any threads waiting on the same candidate

            :param args: Follows the optimizer's convention of objective inputs (typically an array of floats)
            :param name: Caller name to insert into the result dictionary
            :param idx: Thread index, used for signal conditions
            :param objective_keys: Ordered list of keys to get the objective from the result dictionary
            :return: the numeric value being optimized
            """
            nonlocal eval_count
            eval_count += 1

            self.check_interrupt()
            candidate = self.get_candidate(*args)
            self.cache_info['total_evals'] += 1
            obj = None

            try:
                result = self.cache[candidate]
                # print(f"cache hit {self.cache_info['total_evals']}")

                if 'exception' in result.keys():
                    if self.options['retry']:
                        self.cache.delete(candidate)
                        raise KeyError

                self.cache_info['hits'] += 1

                # Result available in cache, no work needed
                # Append this caller name to the result dictionary
                result['caller'].append((name, eval_count))
                self.cache[candidate] = result

            except KeyError:
                # Execute task, measure evaluation time
                start_time = time.time()
                result = self.problem.evaluate_objective(candidate)
                result['eval_time'] = time.time() - start_time
                result['caller'] = [(name, eval_count)]

                self.cache[candidate] = result
                self.cache_info['misses'] += 1

                # KeyboardInterrupt places a OptimizerInterrupt in the cache to signal a force_stop
                if not isinstance(result, dict):
                    self.force_stop = True
                    self.check_interrupt()

                # Update best objective if needed, and print a log line to console
                if objective is not None:
                    obj = objective(result)

                    if (self.best_obj is None) or (obj < self.best_obj):
                        self.best_obj = obj
                        reason = 'new_best'

                    else:
                        reason = ''

                else:
                    reason = ''

                self.eval_count += 1
                self.print_log_line(reason, obj, result['eval_time'])

                self.cache_info['size'] += 1

            return obj

        return s_wrapper

    def execute(self, callables, inputs):
        """
        Execute each pairwise callable given input in separate threads, using up to n_processors or the number of
        callables whichever is less.

        :param callables: A list of callable functions (e.g. a list of optimizer functions)
        :param inputs: A list of inputs, one for each callable (e.g. a list of wrapped problem objectives)
        :param objective_keys: A list of keys for the result nested dictionary structure
        :return: Either the best objective found, corresponding to objective_keys, or the number of
                successful evaluations if objective_keys is None
        """
        # setup
        self.start_time = time.time()
        self.eval_count = 0
        self.force_stop = False

        # Begin parallel execution
        self.print_log_header()
        output = dict()

        try:
            for f, input, name in zip(callables, inputs, self.opt_names):
                try:
                    output[name] = f(input)

                # On an OptimizerInterrupt cancel all pending futures
                except OptimizerInterrupt:
                    break

                # Print any others
                except Exception as exc:
                    err_str = traceback.format_exc()
                    print(f"{name} generated an exception: {err_str}")

                # Optimizer thread exits normally
                else:
                    # Print done message if input is a function (as in an optimization run)
                    if callable(inputs[0]):
                        print(f"Optimizer {name} finished")

        # Allows clean exit on KeyboardInterrupt
        except KeyboardInterrupt:
            pass

        self.write_cache()
        self.cache.close()
        return output

    def parallel_execute(self, callables, inputs):
        """
        Execute each pairwise callable given input in separate threads, using up to n_processors or the number of
        callables whichever is less.

        :param callables: A list of callable functions (e.g. a list of optimizer functions)
        :param inputs: A list of inputs, one for each callable (e.g. a list of wrapped problem objectives)
        :param objective_keys: A list of keys for the result nested dictionary structure
        :return: Either the best objective found, corresponding to objective_keys, or the number of
                successful evaluations if objective_keys is None
        """
        # setup
        self.start_time = time.time()
        self.eval_count = 0
        self.force_stop = False

        # Establish communication queues and execution workers-
        num_workers = min(
            self.options['n_proc'],
            len(callables))  # optimizers are assumed to be serial
        self.init_parallel_workers(num_workers)

        # Add thread conditions to allow signaling between threads waiting on the same candidate
        self.conditions = [
            threading.Condition() for _ in range(len(callables))
        ]

        # Begin parallel execution
        self.print_log_header()
        output = dict()

        with cf.ThreadPoolExecutor(max_workers=num_workers) as executor:
            try:
                threads = {
                    executor.submit(callables[i], inputs[i]): name
                    for i, name in enumerate(self.opt_names)
                }

                for future in cf.as_completed(threads):
                    name = threads[future]
                    try:
                        output[name] = future.result()

                    # On an OptimizerInterrupt cancel all pending futures
                    except OptimizerInterrupt:
                        for f, name in threads.items():
                            f.cancel()
                        break

                    # Print any others
                    except Exception as exc:
                        err_str = traceback.format_exc()
                        print(f"{name} generated an exception: {err_str}")

                    # Optimizer thread exits normally
                    else:
                        # Print done message if input is a function (as in an optimization run)
                        if callable(inputs[0]):
                            print(f"Optimizer {name} finished")

            # Allows clean exit on KeyboardInterrupt
            except KeyboardInterrupt:
                for future, name in threads.items():
                    future.cancel()

        # End worker processes
        self.cleanup_parallel()
        self.write_cache()
        self.cache.close()

        return output

    def sample(self, candidates, design_name='Sample') -> int:
        """
        Execute the objective function on each candidate in a sample in parallel, using yp to n_processors or the
        number of candidates threads.

        :param candidates: A list of unit arrays corresponding to the samples of a design.
        :return: The number of successful evaluations.
        """
        n_candidates = len(candidates)
        self.opt_names = [f"{design_name}-{i}" for i in range(n_candidates)]

        callables = [
            partial(self.wrapped_objective(), name=name)
            for i, name in enumerate(self.opt_names)
        ]

        evaluations = self.execute(callables, candidates)

        return evaluations

    def parallel_sample(self, candidates, design_name='Sample') -> int:
        """
        Execute the objective function on each candidate in a sample in parallel, using yp to n_processors or the
        number of candidates threads.

        :param candidates: A list of unit arrays corresponding to the samples of a design.
        :return: The number of successful evaluations.
        """
        n_candidates = len(candidates)
        self.opt_names = [f"{design_name}-{i}" for i in range(n_candidates)]

        callables = [
            partial(self.wrapped_parallel_objective(), name=name, idx=i)
            for i, name in enumerate(self.opt_names)
        ]

        output = self.parallel_execute(callables, candidates)

        return output

    def optimize(self, optimizers, opt_configs, objectives) -> tuple:
        """
        Execute the the list of optimizers on an instance of the wrapped objective function, using up to n_processors
        or the number of optimizers.

        :param optimizers: A list of optimization callable functions, taking the function to be optimized and config.
        :param opt_config: The common optimizer configuration, shared between all optimization functions.
        :param objective_keys: A list of keys for the result nested dictionary structure
        :return: The best candidate and best simulation result found.
        """
        n_opt = len(optimizers)
        self.opt_names = [
            f"{opt.__name__}-{obj.__name__}"
            for opt, obj in zip(optimizers, objectives)
        ]
        self.meta['obj'] = {
            f"{obj.__name__}-{i}": inspect.getsource(obj)
            for i, obj in enumerate(objectives)
        }

        # Defining optimizer thread callables and inputs
        # The wrapped objective function is the input to the optimizer
        callables = [
            partial(opt, **config)
            for opt, config in zip(optimizers, opt_configs)
        ]
        inputs = [
            partial(self.wrapped_objective(), name=name, objective=obj)
            for i, (name, obj) in enumerate(zip(self.opt_names, objectives))
        ]

        # Some optimizers need the threads to have a __name__ attribute, partial objects do not
        for i in range(n_opt):
            inputs[i].__name__ = self.opt_names[i]

        # best_candidate, best_result = self.execute(callables, inputs, objective=objective)
        output = self.execute(callables, inputs)

        return output

    def parallel_optimize(self, optimizers, opt_configs, objectives) -> tuple:
        """
        Execute the the list of optimizers on an instance of the wrapped objective function, using up to n_processors
        or the number of optimizers.

        :param optimizers: A list of optimization callable functions, taking the function to be optimized and config.
        :param opt_config: The common optimizer configuration, shared between all optimization functions.
        :param objective_keys: A list of keys for the result nested dictionary structure
        :return: The best candidate and best simulation result found.
        """
        n_opt = len(optimizers)
        self.opt_names = [
            f"{opt.__name__}-{obj.__name__}"
            for opt, obj in zip(optimizers, objectives)
        ]
        self.meta['obj'] = {
            f"{obj.__name__}-{i}": inspect.getsource(obj)
            for i, obj in enumerate(objectives)
        }

        # Defining optimizer thread callables and inputs
        # The wrapped objective function is the input to the optimizer
        callables = [
            partial(opt, **config)
            for opt, config in zip(optimizers, opt_configs)
        ]
        inputs = [
            partial(self.wrapped_parallel_objective(),
                    name=name,
                    objective=obj,
                    idx=i)
            for i, (name, obj) in enumerate(zip(self.opt_names, objectives))
        ]

        # Some optimizers need the threads to have a __name__ attribute, partial objects do not
        for i in range(n_opt):
            inputs[i].__name__ = self.opt_names[i]

        # best_candidate, best_result = self.parallel_execute(callables, inputs, objective=objective)
        output = self.parallel_execute(callables, inputs)

        return output