def run(self): if self.prepare_data: prepare_data_conf = read_json(self.const).get("prepare_data") with pushd(self.working_dir): opt, value = prepare_data(prepare_data_conf) self.add_options[opt_to_str(opt)] = value evals = [] for self.current_epoch in xrange(self.current_epoch, self.current_epoch+self.epochs): logging.info("Running epoch {}:".format(self.current_epoch)) run_proc(**self.construct_cmd()) if self.inspection: if self.evaluation_data: logging.info("running on evaluation data ...") run_proc(**self.construct_eval_run_cmd()) logging.info("inspecting ... ") with pushd(self.working_dir): o = run_proc(**self.construct_inspect_cmd()) if self.evaluation: evals.append(float(o.strip())) logging.info("Evaluation score: {}".format(evals[-1])) if len(evals)>0: final_score = sum(evals)/len(evals) logging.info("Final evaluation score: {}".format(final_score)) if self.slave: print final_score logging.info("Done")
def run(self): if self.prepare_data: prepare_data_conf = read_json(self.const).get("prepare_data") with pushd(self.working_dir): opt, value = prepare_data(prepare_data_conf) self.add_options[opt_to_str(opt)] = value evals = [] for self.current_epoch in xrange(self.current_epoch, self.current_epoch + self.epochs): logging.info("Running epoch {}:".format(self.current_epoch)) run_proc(**self.construct_cmd()) if self.inspection: if self.evaluation_data: logging.info("running on evaluation data ...") run_proc(**self.construct_eval_run_cmd()) logging.info("inspecting ... ") with pushd(self.working_dir): o = run_proc(**self.construct_inspect_cmd()) if self.evaluation: evals.append(float(o.strip())) logging.info("Evaluation score: {}".format(evals[-1])) if len(evals) > 0: final_score = sum(evals) / len(evals) logging.info("Final evaluation score: {}".format(final_score)) if self.slave: print final_score logging.info("Done")
def runner(x, vars, working_dir, wait=False, id=None, min=0.0, max=1.0): if id is None: id = uuid.uuid1() working_dir = pj(working_dir, str(id)) if os.path.exists(working_dir): raise Exception("Working dir is already exists {}!".format(working_dir)) make_dir(working_dir) const_json = pj(working_dir, os.path.basename(GlobalConfig.ConstFilename)) specs = read_json(GlobalConfig.VarSpecsFile) with open(const_json, "w") as fptr: fptr.write( proc_vars( const = read_json(GlobalConfig.ConstFilename) , var_specs = specs , vars = dict(zip(vars, x)) , min = min , max = max ) ) cmd = [ RUN_SIM_PY , "--working-dir", working_dir , "--epochs", str(GlobalConfig.Epochs) , "--const", const_json , "--slave" , "--jobs", str(GlobalConfig.SimJobs) ] + GlobalConfig.AddOptions for v in vars: path, range = specs[v] if "prepare_data" in path: cmd += ["--prepare-data"] break logging.info(" ".join(cmd)) if GlobalConfig.Mock: p = sub.Popen("sleep 1.0 && echo 1.0", shell=True, stdout=sub.PIPE, stderr=sub.PIPE) if wait: return communicate(p) return p p = sub.Popen(cmd, stdout=sub.PIPE, stderr=sub.PIPE) if wait: return communicate(p) return p
def runner(x, vars, working_dir, wait=False, id=None, min=0.0, max=1.0): if id is None: id = uuid.uuid1() working_dir = pj(working_dir, str(id)) if os.path.exists(working_dir): raise Exception( "Working dir is already exists {}!".format(working_dir)) make_dir(working_dir) const_json = pj(working_dir, os.path.basename(GlobalConfig.ConstFilename)) specs = read_json(GlobalConfig.VarSpecsFile) with open(const_json, "w") as fptr: fptr.write( proc_vars(const=read_json(GlobalConfig.ConstFilename), var_specs=specs, vars=dict(zip(vars, x)), min=min, max=max)) cmd = [ RUN_SIM_PY, "--working-dir", working_dir, "--epochs", str(GlobalConfig.Epochs), "--const", const_json, "--slave", "--jobs", str(GlobalConfig.SimJobs) ] + GlobalConfig.AddOptions for v in vars: path, range = specs[v] if "prepare_data" in path: cmd += ["--prepare-data"] break logging.info(" ".join(cmd)) if GlobalConfig.Mock: p = sub.Popen("sleep 1.0 && echo 1.0", shell=True, stdout=sub.PIPE, stderr=sub.PIPE) if wait: return communicate(p) return p p = sub.Popen(cmd, stdout=sub.PIPE, stderr=sub.PIPE) if wait: return communicate(p) return p
def main(argv): epi = "" epi += "List of variables to evolve:\n" for k, v in read_json(GlobalConfig.VarSpecsFile).iteritems(): epi += "\t\t{}\n\t\t\tpath: {}, range: {}-{}\n".format( k, "/".join([str(subv) for subv in v[0]]), v[1][0], v[1][1]) epi += "List of algorithms:\n" for a in ALGS: epi += "\t{}\n".format(a) inst = ALGS[a]({}) def_attrs = dict([ (a, getattr(inst, a)) for a in dir(inst) if not a.startswith("__") and not callable(getattr(inst, a)) ]) for k, v in def_attrs.iteritems(): epi += "\t\t{}, default: {}\n".format(k, v) parser = argparse.ArgumentParser( formatter_class=argparse.RawTextHelpFormatter, description='Tool for evolving simulations dnn', epilog=epi) parser.add_argument( '-v', '--vars', required=False, help= 'Variables included in evolving, separated by ;. Or use all variables; Or if it is a file use all variables from that file' ) parser.add_argument('-e', '--epochs', required=False, help='Epochs to run sim on each run', default=1) parser.add_argument('-j', '--jobs', required=False, type=int, help='Number of parallel jobs for evolving procedure', default=multiprocessing.cpu_count()) parser.add_argument( '-a', '--attr', required=False, help='Attributes for algo: "attr_name=val;attr_name2=val2"', default="") parser.add_argument('-sj', '--sim-jobs', required=False, help='Sim jobs', default=1) parser.add_argument( '-t', '--tag', required=False, help='Tag for run, by defailt algo choosing by himself', default=None) parser.add_argument('-c', '--const', required=False, help='Constants to work with, default %(default)s', default=DnnSim.CONST_JSON) parser.add_argument('algo_name', nargs=1) if len(argv) == 0: parser.print_help() sys.exit(1) args, other = parser.parse_known_args(argv) algo_cls = ALGS.get(args.algo_name[0]) if algo_cls is None: raise Exception("Can't find algo with then name {}".format( args.algo_name[0])) GlobalConfig.Epochs = args.epochs GlobalConfig.AddOptions = other GlobalConfig.SimJobs = args.sim_jobs GlobalConfig.Jobs = args.jobs GlobalConfig.ConstFilename = args.const a = algo_cls(parse_attrs(args.attr)) vars_str = None if args.vars: if os.path.isfile(args.vars): GlobalConfig.VarSpecsFile = args.vars else: vars_str = args.vars vars = [v.strip() for v in vars_str.split(";") if v.strip() ] if vars_str else read_json(GlobalConfig.VarSpecsFile).keys() a(vars, tag=args.tag)
pj(spikes_dir, "timed_pattern_spikes.pb"), "--evaluation-data", pj(spikes_dir, "timed_pattern_spikes_test.pb") ] re.GlobalConfig.SimJobs = 8 re.GlobalConfig.Jobs = 1 work_dir = make_dir(pj(runs_dir, "bo")) vars = read_json(re.GlobalConfig.VarSpecsFile).keys() #id = 0 class ConcreteContinuousGaussModel(ContinuousGaussModel): def __init__(self, ndim, params): assert "Gaussian" in params["surr_name"] ContinuousGaussModel.__init__(self, ndim, params) def evaluateSample(self, Xin): ans = re.runner(Xin, vars, work_dir, wait=True) return -np.log(np.abs(ans)) #def func(x): # ans = re.runner(x, vars, work_dir, wait=True)
def main(argv): epi = "" epi += "List of variables to evolve:\n" for k, v in read_json(GlobalConfig.VarSpecsFile).iteritems(): epi += "\t\t{}\n\t\t\tpath: {}, range: {}-{}\n".format(k, "/".join([ str(subv) for subv in v[0]]), v[1][0], v[1][1]) epi += "List of algorithms:\n" for a in ALGS: epi += "\t{}\n".format(a) inst = ALGS[a]({}) def_attrs = dict([ (a, getattr(inst, a)) for a in dir(inst) if not a.startswith("__") and not callable(getattr(inst, a)) ]) for k, v in def_attrs.iteritems(): epi += "\t\t{}, default: {}\n".format(k, v) parser = argparse.ArgumentParser( formatter_class=argparse.RawTextHelpFormatter, description='Tool for evolving simulations dnn', epilog = epi ) parser.add_argument( '-v', '--vars', required=False, help='Variables included in evolving, separated by ;. Or use all variables; Or if it is a file use all variables from that file' ) parser.add_argument( '-e', '--epochs', required=False, help='Epochs to run sim on each run', default=1 ) parser.add_argument( '-j', '--jobs', required=False, type=int, help='Number of parallel jobs for evolving procedure', default=multiprocessing.cpu_count() ) parser.add_argument( '-a', '--attr', required=False, help='Attributes for algo: "attr_name=val;attr_name2=val2"', default="" ) parser.add_argument( '-sj', '--sim-jobs', required=False, help='Sim jobs', default=1 ) parser.add_argument( '-t', '--tag', required=False, help='Tag for run, by defailt algo choosing by himself', default=None ) parser.add_argument( '-c', '--const', required=False, help='Constants to work with, default %(default)s', default=DnnSim.CONST_JSON ) parser.add_argument( 'algo_name', nargs=1 ) if len(argv) == 0: parser.print_help() sys.exit(1) args, other = parser.parse_known_args(argv) algo_cls = ALGS.get(args.algo_name[0]) if algo_cls is None: raise Exception("Can't find algo with then name {}".format(args.algo_name[0])) GlobalConfig.Epochs = args.epochs GlobalConfig.AddOptions = other GlobalConfig.SimJobs = args.sim_jobs GlobalConfig.Jobs = args.jobs GlobalConfig.ConstFilename = args.const a = algo_cls(parse_attrs(args.attr)) vars_str = None if args.vars: if os.path.isfile(args.vars): GlobalConfig.VarSpecsFile = args.vars else: vars_str = args.vars vars = [ v.strip() for v in vars_str.split(";") if v.strip() ] if vars_str else read_json(GlobalConfig.VarSpecsFile).keys() a(vars, tag=args.tag)
re.GlobalConfig.Epochs = 5 re.GlobalConfig.AddOptions = [ "--spike-input", pj(spikes_dir, "timed_pattern_spikes.pb"), "--evaluation-data", pj(spikes_dir, "timed_pattern_spikes_test.pb") ] re.GlobalConfig.SimJobs = 1 re.GlobalConfig.Jobs = 1 work_dir = make_dir(pj(runs_dir, "bo_gpyopt")) vars = read_json(re.GlobalConfig.VarSpecsFile).keys() n_cores = 8 def evaluateSample(Xin): Xin = np.ndarray.tolist(Xin) procs = [] answers = [] for x in Xin: logging.info("Running with input {}".format(x)) p = re.runner(x, vars, work_dir, wait=False) procs.append(p) if len(procs) >= n_cores: for p in procs: answers.append(re.communicate(p)) procs = []