def hyper_param(self): data = get_data_from_s3(self._bucket, "miso/hyper/{0}_lrMU2".format(self.method_name)) return data['hyperparam']
### copy back from /backup the files where IS 1 is centered directory_backup = '/miso/data/backup' new_func_name = 'lrMU2' # # num_pts = 1000 # for index_IS in func.getList_IS_to_query(): # key = directory_backup+'/hyper_{1}_IS_{0}_{2}_points'.format(index_IS, func_name, num_pts) # data = get_data_from_s3(bucket, key) # # # write to lrMU2 file # key_new = directory + '/hyper_{1}_IS_{0}_{2}_points'.format(index_IS, new_func_name, num_pts) # # print key_new # send_data_to_s3(bucket, key_new, data) # init data num_replications = 100 num_pts = 10 for repl_no in xrange(num_replications): print '\nrepl ' + str(repl_no) for index_IS in func.getList_IS_to_query(): key = directory_backup + '/{2}_IS_{0}_{3}_points_repl_{1}'.format( index_IS, repl_no, func_name, num_pts) data = get_data_from_s3(bucket, key) # write to lrMU2 file key_new = directory + '/{2}_IS_{0}_{3}_points_repl_{1}'.format( index_IS, repl_no, new_func_name, num_pts) # print key_new send_data_to_s3(bucket, key_new, data)
def hyper_param(self): # with open("{0}/{1}_rb.pickle".format(self.hyper_dir, self.method_name), 'rb') as f: # data = pickle.load(f) data = get_data_from_s3(self._bucket, "coldstart/hyper/{0}_rb".format(self.method_name)) return data['hyperparam']
def hyper_param(self): data = get_data_from_s3( self._bucket, "coldstart/hyper/{0}_ato".format(self.method_name)) return data['hyperparam']
__author__ = 'matthiaspoloczek' ''' Inspect the data used to generate hypers. The command line arg could be lrMU. ''' conn = S3Connection() conn = boto.connect_s3() bucket = conn.get_bucket('poloczeks3', validate=True) argv = sys.argv[1:] func_name = argv[0] directory = '/miso/data' num_pts = 1000 key = directory + '/hyper_{1}_IS_{0}_{2}_points'.format(0, func_name, num_pts) data0 = get_data_from_s3(bucket, key) key = directory + '/hyper_{1}_IS_{0}_{2}_points'.format(1, func_name, num_pts) data1 = get_data_from_s3(bucket, key) ### To inspect initial data # num_pts = 10 # key = directory+'/{1}_IS_{0}_{2}_points_repl_0'.format(0, func_name, num_pts) # data0 = get_data_from_s3(bucket, key) # key = directory+'/{1}_IS_{0}_{2}_points_repl_0'.format(1, func_name, num_pts) # data1 = get_data_from_s3(bucket, key) print np.mean(data0["vals"]) print np.mean(data1["vals"]) # # print np.mean(data0["noise"]) # print np.mean(data1["noise"])
Script to inspect hypers stored at S3 invoke as : python inspect_hypers.py miso_lrMU_hyper_ego Optional parameters for lrMU: "miso_lrMU_hyper_ego": "miso_lrMU_hyper_mkg": "miso_lrMU_hyper_pes": "miso_lrMU_hyper_mei": ''' conn = S3Connection() conn = boto.connect_s3() bucket = conn.get_bucket(s3_bucket_name, validate=True) # construct problem instance given CMD args argv = sys.argv[1:] if argv[0].find("ego") < 0 and argv[0].find("kg") < 0 and argv[0].find("mei") < 0 and argv[0].find("mkg") < 0\ and argv[0].find("pes") < 0: raise ValueError("No correct algo selected!") problem = identify_problem(argv, bucket) data = get_data_from_s3(bucket, problem.hyper_path) print "prior_mean = " + str(data["prior_mean"]) print "prior_sig = " + str(data["prior_sig"]) if argv[0].find("pes") >=0: print "hyperparam = " + str(data["hyperparam"]) print "hyperparam_mat = " + str(data["hyperparam_mat"]) else: print "hyper_bounds = " + str(data["hyper_bounds"]) print "hyperparam = " + str(data["hyperparam"]) print "loglikelihood = " + str(data["loglikelihood"])
def hyper_param(self): data = get_data_from_s3( self._bucket, "miso/hyper/{0}_{1}".format( self.method_name, self._obj_func[self._obj_func_idx].getFuncName())) return data['hyperparam']
__author__ = 'matthiaspoloczek' ''' Script to inspect data stored at S3 invoke as : python inspect_results_s3.py miso_lrMU_benchmark_mkgcandpts 0 where 0 is a natural integer determining a replication Optional parameters for lrMU: "miso_lrMU_benchmark_ego": "miso_lrMU_benchmark_mkg": "miso_lrMU_benchmark_mkgcandpts": "miso_lrMU_benchmark_pes": "miso_lrMU_benchmark_mei": ''' conn = S3Connection() conn = boto.connect_s3() bucket = conn.get_bucket(s3_bucket_name, validate=True) # construct problem instance given CMD args argv = sys.argv[1:] if argv[0].find("ego") < 0 and argv[0].find("kg") < 0 and argv[0].find("mei") < 0 and argv[0].find("mkg") < 0\ and argv[0].find("pes") < 0: raise ValueError("No correct algo selected!") problem = identify_problem(argv, bucket) data = get_data_from_s3(bucket, problem.result_path) # print data['sampled_is'] print data['raw_voi']