def __init__( self, model, maximizable=False, tokenizer=None, use_cache=True, query_budget=float("inf"), model_batch_size=32, model_cache_size=2**18, ): validators.validate_model_goal_function_compatibility( self.__class__, model.__class__) self.model = model self.maximizable = maximizable self.tokenizer = tokenizer if not self.tokenizer: if hasattr(self.model, "tokenizer"): self.tokenizer = self.model.tokenizer else: raise NameError( "Cannot instantiate goal function without tokenizer") if not hasattr(self.tokenizer, "encode"): raise TypeError("Tokenizer must contain `encode()` method") self.use_cache = use_cache self.query_budget = query_budget self.model_batch_size = model_batch_size if self.use_cache: self._call_model_cache = lru.LRU(model_cache_size) else: self._call_model_cache = None
def __init__(self, model, use_cache=True): validators.validate_model_goal_function_compatibility( self.__class__, model.__class__) self.model = model self.use_cache = use_cache self.num_queries = 0 if self.use_cache: self._call_model_cache = lru.LRU(utils.config('MODEL_CACHE_SIZE')) else: self._call_model_cache = None
def __init__( self, model, maximizable=False, use_cache=True, query_budget=float("inf"), model_cache_size=2**20, ): validators.validate_model_goal_function_compatibility( self.__class__, model.__class__) self.model = model self.maximizable = maximizable self.use_cache = use_cache self.query_budget = query_budget if self.use_cache: self._call_model_cache = lru.LRU(model_cache_size) else: self._call_model_cache = None
def __init__( self, model, tokenizer=None, use_cache=True, query_budget=float("inf") ): validators.validate_model_goal_function_compatibility( self.__class__, model.__class__ ) self.model = model self.tokenizer = tokenizer if not self.tokenizer: if hasattr(self.model, "tokenizer"): self.tokenizer = self.model.tokenizer else: raise NameError("Cannot instantiate goal function without tokenizer") if not hasattr(self.tokenizer, "encode"): raise TypeError("Tokenizer must contain `encode()` method") self.use_cache = use_cache self.num_queries = 0 self.query_budget = query_budget if self.use_cache: self._call_model_cache = lru.LRU(utils.config("MODEL_CACHE_SIZE")) else: self._call_model_cache = None