def run_sim_job(batch_idx, in_dir, out_dir): in_file = '%s/batch%d_in.csv' % (in_dir, batch_idx) out_file = 'batch%d_out.csv' % batch_idx in_data = pd.read_csv(in_file) run = Samplerun(no_docker=True) run.perform_sample(out_file=out_file, out_dir=out_dir, param_values=in_data)
def test_samplerun(): ''' Sets up uniform generation of data points and csv output. ''' n_samples = 5 run1 = Samplerun() run1.perform_sample(out_file='100uniform.csv', n_samples=n_samples, domain=Domain(), sampling_strategy=UniformSamplingStrategy())
def test_runfromfile(): ''' Sets up uniform generation of data points and csv output. ''' n_samples = 100 run1 = Samplerun() run1.perform_sample( out_file='100fix0000000out.csv', n_samples=n_samples, param_values=pd.read_csv("params/100params0000000.csv"), domain=Domain(), sampling_strategy=UniformSamplingStrategy())
def query_tbr(self): run = Samplerun(no_docker=True) sampled = run.perform_sample( out_file=None, param_values=self.tbr_params, progress_handler=self.progress_handler) param_names = [self.x_param_name, self.y_param_name] sampled = pd.merge(self.tbr_params, sampled, how='left', left_on=param_names, right_on=param_names, suffixes=('', '_dup_')) sampled.drop([ column for column in sampled.columns if column.endswith('_dup_') ], axis=1, inplace=True) self.samples_available.emit(sampled) self.finished.emit()
def theory_TBR(params, domain, n_samples=1): run = Samplerun() return run.perform_sample(domain=domain, n_samples=n_samples, param_values=params)