def __init__(self, **kwargs): self.TILTED_Q = retrieve_param_opt("TILTED_Q", kwargs) self.LINEX_C = retrieve_param_opt("LINEX_C", kwargs) print("[LossFactory] Configuration: %s" % " ".join("%s=%s" % (k, format_value(v)) for k, v in vars(self).items() if k.upper() == k))
def __init__(self, **kwargs): self.H_NSAMPLES_UTILITY_TERM_THETA = retrieve_param( "H_NSAMPLES_UTILITY_TERM_THETA", kwargs) self.H_NSAMPLES_UTILITY_TERM_Y = retrieve_param( "H_NSAMPLES_UTILITY_TERM_Y", kwargs) self.H_NUMERICAL_MAX_NITER = retrieve_param_opt( "H_NUMERICAL_MAX_NITER", kwargs, 10000) self.H_NUMERICAL_MAX_NITER_TOL = retrieve_param_opt( "H_NUMERICAL_MAX_NITER_TOL", kwargs, 0.0001) self.H_NUMERICAL_MAX_NITER_TOL_GOAL = retrieve_param_opt( "H_NUMERICAL_MAX_NITER_TOL_GOAL", kwargs, 1e-10) self.H_NUMERICAL_LR = retrieve_param_opt("H_NUMERICAL_LR", kwargs, 0.1) self.H_NUMERICAL_START_FROM_PREVIOUS = retrieve_param_opt( "H_NUMERICAL_START_FROM_PREVIOUS", kwargs, False) self.sample_predictive_y0 = retrieve_param( "sample_predictive_y0", kwargs) #default sampler of y print("[HOptimizerFactory] Configuration: %s" % " ".join("%s=%s" % (k, format_value(v)) for k, v in vars(self).items() if k.upper() == k or k.startswith("sample"))) self.optimal_h_bayes_estimator = kwargs.get( "optimal_h_bayes_estimator", None) if kwargs.get("H_NUMERICAL_OPT_SCIPY", False): print("[HOptimizerFactory] Choosing SciPy numerical optimization.") self.optimal_h_numerically = optimal_h_numerically_ty_scipy else: print( "[HOptimizerFactory] Choosing PyTorch numerical optimization.") self.optimal_h_numerically = optimal_h_numerically_ty
def __init__(self, **kwargs): self.M = retrieve_param("M", kwargs) self.GAMMA = retrieve_param_opt("GAMMA", kwargs, 1.0) print("[UtilityFactory] Configuration: %s" % " ".join("%s=%s" % (k, format_value(v)) for k, v in vars(self).items() if k.upper()==k) )
def __init__(self, y, loss, u, sample_predictive_y, optimal_h_bayes_estimator=None, y_mask=None, GAIN_OPTIMAL_H_NUMERICALLY=True, RISK_OPTIMAL_H_NUMERICALLY=False, EVAL_NSAMPLES_UTILITY_TERM_THETA=1000, EVAL_NSAMPLES_UTILITY_TERM_Y=1, EVAL_MAX_NITER=10000, EVAL_SGD_PREC=0.0001, EVAL_LR=0.01, EVAL_RESAMPLE_EVERY_TIME=False): """ Args: y Evaluation data. y_mask A mask selecting data points for evaluation (default: all). loss A function y x h -> loss used to calculate risks. u A function y x h -> utility used to calculate gains. sample_predictive_y A function that for each data point from y, generates samples from predictive posterior. EVAL_RESAMPLE_EVERY_TIME Can results of sample_predictive_y, optimal_h_for_gain and optimal_h_for_risk be cached? """ self.y = y self.y_mask = y_mask if self.y_mask is None: print2( "[Evaluation] WARNING: using default all data points in evaluation." ) env = torch if "cpu" in str(self.y.device).lower() else torch.cuda self.y_mask = torch.ones_like(self.y).type(env.ByteTensor) self.loss = loss self.utility = u self.sample_predictive_y = sample_predictive_y self.optimal_h_bayes_estimator = optimal_h_bayes_estimator if (self.optimal_h_bayes_estimator is None) and \ (not GAIN_OPTIMAL_H_NUMERICALLY or not RISK_OPTIMAL_H_NUMERICALLY): print2( "[Evaluation] WARNING: Optimal decisions h for both Risk and Gain will be obtained numerically." ) self.optimal_h_bayes_estimator = lambda ys: None GAIN_OPTIMAL_H_NUMERICALLY, RISK_OPTIMAL_H_NUMERICALLY = True, True self.GAIN_OPTIMAL_H_NUMERICALLY = GAIN_OPTIMAL_H_NUMERICALLY self.RISK_OPTIMAL_H_NUMERICALLY = RISK_OPTIMAL_H_NUMERICALLY self.EVAL_NSAMPLES_UTILITY_TERM_THETA = EVAL_NSAMPLES_UTILITY_TERM_THETA self.EVAL_NSAMPLES_UTILITY_TERM_Y = EVAL_NSAMPLES_UTILITY_TERM_Y self.EVAL_MAX_NITER = EVAL_MAX_NITER self.EVAL_SGD_PREC = EVAL_SGD_PREC self.EVAL_LR = EVAL_LR print("[Evaluation] Configuration: %s" % " ".join("%s=%s" % (k, format_value(v)) for k, v in vars(self).items())) if not EVAL_RESAMPLE_EVERY_TIME: self.optimal_h_for_gain = cache_function_last_result( self.optimal_h_for_gain) self.optimal_h_for_risk = cache_function_last_result( self.optimal_h_for_risk) self.sample_predictive_posterior = cache_function_last_result( self.sample_predictive_posterior)