def test_clone(): config = config0() config2 = clone(config) nodeset = set(dfs(config)) assert not any(n in nodeset for n in dfs(config2)) foo = recursive_set_rng_kwarg(config, scope.rng_from_seed(5)) r = rec_eval(foo) print r r2 = rec_eval(recursive_set_rng_kwarg(config2, scope.rng_from_seed(5))) print r2 assert r == r2
def test_vectorize_multipath(): N = as_apply(15) p0 = hp_uniform('p0', 0, 1) loss = hp_choice('p1', [1, p0, -p0])**2 expr_idxs = scope.range(N) vh = VectorizeHelper(loss, expr_idxs, build=True) vloss = vh.v_expr print vloss full_output = as_apply([vloss, vh.idxs_by_label(), vh.vals_by_label()]) new_vc = recursive_set_rng_kwarg( full_output, as_apply(np.random.RandomState(1)), ) losses, idxs, vals = rec_eval(new_vc) print 'losses', losses print 'idxs p0', idxs['p0'] print 'vals p0', vals['p0'] print 'idxs p1', idxs['p1'] print 'vals p1', vals['p1'] p0dct = dict(zip(idxs['p0'], vals['p0'])) p1dct = dict(zip(idxs['p1'], vals['p1'])) for ii, li in enumerate(losses): print ii, li if p1dct[ii] != 0: assert li == p0dct[ii]**2 else: assert li == 1
def test_vectorize_multipath(): N = as_apply(15) p0 = hp_uniform('p0', 0, 1) loss = hp_choice('p1', [1, p0, -p0]) ** 2 expr_idxs = scope.range(N) vh = VectorizeHelper(loss, expr_idxs, build=True) vloss = vh.v_expr print vloss full_output = as_apply([vloss, vh.idxs_by_label(), vh.vals_by_label()]) new_vc = recursive_set_rng_kwarg( full_output, as_apply(np.random.RandomState(1)), ) losses, idxs, vals = rec_eval(new_vc) print 'losses', losses print 'idxs p0', idxs['p0'] print 'vals p0', vals['p0'] print 'idxs p1', idxs['p1'] print 'vals p1', vals['p1'] p0dct = dict(zip(idxs['p0'], vals['p0'])) p1dct = dict(zip(idxs['p1'], vals['p1'])) for ii, li in enumerate(losses): print ii, li if p1dct[ii] != 0: assert li == p0dct[ii] ** 2 else: assert li == 1
def __init__(self, bandit, seed=seed, cmd=None, workdir=None): self.bandit = bandit self.seed = seed self.rng = np.random.RandomState(self.seed) self.cmd = cmd self.workdir = workdir self.s_new_ids = pyll.Literal('new_ids') # -- list at eval-time before = pyll.dfs(self.bandit.expr) # -- raises exception if expr contains cycles pyll.toposort(self.bandit.expr) vh = self.vh = VectorizeHelper(self.bandit.expr, self.s_new_ids) # -- raises exception if v_expr contains cycles pyll.toposort(vh.v_expr) idxs_by_label = vh.idxs_by_label() vals_by_label = vh.vals_by_label() after = pyll.dfs(self.bandit.expr) # -- try to detect if VectorizeHelper screwed up anything inplace assert before == after assert set(idxs_by_label.keys()) == set(vals_by_label.keys()) assert set(idxs_by_label.keys()) == set(self.bandit.params.keys()) # -- make the graph runnable and SON-encodable # N.B. operates inplace self.s_idxs_vals = recursive_set_rng_kwarg( scope.pos_args(idxs_by_label, vals_by_label), pyll.as_apply(self.rng)) # -- raises an exception if no topological ordering exists pyll.toposort(self.s_idxs_vals)
def test_vectorize_simple(): N = as_apply(15) p0 = hp_uniform('p0', 0, 1) loss = p0 ** 2 print loss expr_idxs = scope.range(N) vh = VectorizeHelper(loss, expr_idxs, build=True) vloss = vh.v_expr full_output = as_apply([vloss, vh.idxs_by_label(), vh.vals_by_label()]) fo2 = replace_repeat_stochastic(full_output) new_vc = recursive_set_rng_kwarg( fo2, as_apply(np.random.RandomState(1)), ) #print new_vc losses, idxs, vals = rec_eval(new_vc) print 'losses', losses print 'idxs p0', idxs['p0'] print 'vals p0', vals['p0'] p0dct = dict(zip(idxs['p0'], vals['p0'])) for ii, li in enumerate(losses): assert p0dct[ii] ** 2 == li
def test_vectorize_simple(): N = as_apply(15) p0 = hp_uniform('p0', 0, 1) loss = p0**2 print loss expr_idxs = scope.range(N) vh = VectorizeHelper(loss, expr_idxs, build=True) vloss = vh.v_expr full_output = as_apply([vloss, vh.idxs_by_label(), vh.vals_by_label()]) fo2 = replace_repeat_stochastic(full_output) new_vc = recursive_set_rng_kwarg( fo2, as_apply(np.random.RandomState(1)), ) #print new_vc losses, idxs, vals = rec_eval(new_vc) print 'losses', losses print 'idxs p0', idxs['p0'] print 'vals p0', vals['p0'] p0dct = dict(zip(idxs['p0'], vals['p0'])) for ii, li in enumerate(losses): assert p0dct[ii]**2 == li
def evaluate(self, config, ctrl): memo = self.memo_from_config(config) memo[self.pyll_ctrl] = ctrl if self.init_pyll_memo: memo = self.init_pyll_memo(memo, config=config, ctrl=ctrl) if self.rng is not None and not self.installed_rng: # -- N.B. this modifies the expr graph in-place # XXX this feels wrong self.expr = recursive_set_rng_kwarg(self.expr, pyll.as_apply(self.rng)) self.installed_rng = True try: # -- the "work" of evaluating `config` can be written # either into the pyll part (self.expr) # or the normal Python part (self.fn) pyll_rval = pyll.rec_eval(self.expr, memo=memo) rval = self.fn(pyll_rval) except Exception, e: n_match = 0 for match, match_pair in self.exceptions: if match(e): rval = match_pair(e) logger.info('Caught fn exception %s' % str(rval)) n_match += 1 break if n_match == 0: raise
def test_clone(): config = config0() config2 = clone(config) nodeset = set(dfs(config)) assert not any(n in nodeset for n in dfs(config2)) foo = recursive_set_rng_kwarg( config, scope.rng_from_seed(5)) r = rec_eval(foo) print r r2 = rec_eval( recursive_set_rng_kwarg( config2, scope.rng_from_seed(5))) print r2 assert r == r2
def evaluate(self, config, ctrl, attach_attachments=True): memo = self.memo_from_config(config) self.use_obj_for_literal_in_memo(ctrl, base.Ctrl, memo) if self.rng is not None and not self.installed_rng: # -- N.B. this modifies the expr graph in-place # XXX this feels wrong self.expr = recursive_set_rng_kwarg(self.expr, pyll.as_apply(self.rng)) self.installed_rng = True if self.pass_expr_memo_ctrl: rval = self.fn( expr=self.expr, memo=memo, ctrl=ctrl, *self.args) else: # -- the "work" of evaluating `config` can be written # either into the pyll part (self.expr) # or the normal Python part (self.fn) pyll_rval = pyll.rec_eval(self.expr, memo=memo, print_node_on_error=self.rec_eval_print_node_on_error) rval = self.fn(pyll_rval, *self.args) if isinstance(rval, (float, int, np.number)): dict_rval = {'loss': rval} elif isinstance(rval, (dict,)): dict_rval = rval if 'loss' not in dict_rval: raise ValueError('dictionary must have "loss" key', dict_rval.keys()) else: raise TypeError('invalid return type (neither number nor dict)', rval) if dict_rval['loss'] is not None: # -- fail if cannot be cast to float dict_rval['loss'] = float(dict_rval['loss']) dict_rval.setdefault('status', base.STATUS_OK) if dict_rval['status'] not in base.STATUS_STRINGS: raise ValueError('invalid status string', dict_rval['status']) if attach_attachments: attachments = dict_rval.pop('attachments', {}) for key, val in attachments.items(): ctrl.attachments[key] = val # -- don't do this here because SON-compatibility is only a requirement # for trials destined for a mongodb. In-memory rvals can contain # anything. #return base.SONify(dict_rval) return dict_rval
def evaluate(self, config, ctrl, attach_attachments=True): memo = self.memo_from_config(config) self.use_obj_for_literal_in_memo(ctrl, base.Ctrl, memo) if self.rng is not None and not self.installed_rng: # -- N.B. this modifies the expr graph in-place # XXX this feels wrong self.expr = recursive_set_rng_kwarg(self.expr, pyll.as_apply(self.rng)) self.installed_rng = True if self.pass_expr_memo_ctrl: rval = self.fn(expr=self.expr, memo=memo, ctrl=ctrl) else: # -- the "work" of evaluating `config` can be written # either into the pyll part (self.expr) # or the normal Python part (self.fn) pyll_rval = pyll.rec_eval( self.expr, memo=memo, print_node_on_error=self.rec_eval_print_node_on_error) rval = self.fn(pyll_rval) if isinstance(rval, (float, int, np.number)): dict_rval = {'loss': rval} elif isinstance(rval, (dict, )): dict_rval = rval if 'loss' not in dict_rval: raise ValueError('dictionary must have "loss" key', dict_rval.keys()) else: raise TypeError('invalid return type (neither number nor dict)', rval) if dict_rval['loss'] is not None: # -- fail if cannot be cast to float dict_rval['loss'] = float(dict_rval['loss']) dict_rval.setdefault('status', base.STATUS_OK) if dict_rval['status'] not in base.STATUS_STRINGS: raise ValueError('invalid status string', dict_rval['status']) if attach_attachments: attachments = dict_rval.pop('attachments', {}) for key, val in attachments.items(): ctrl.attachments[key] = val # -- don't do this here because SON-compatibility is only a requirement # for trials destined for a mongodb. In-memory rvals can contain # anything. #return base.SONify(dict_rval) return dict_rval
def __init__(self, fn, expr, args=[], workdir=None, pass_expr_memo_ctrl=None, **bandit_kwargs): self.cmd = ('domain_attachment', 'FMinIter_Domain') self.fn = fn self.expr = expr self.args = args if pass_expr_memo_ctrl is None: self.pass_expr_memo_ctrl = getattr(fn, 'fmin_pass_expr_memo_ctrl', False) else: self.pass_expr_memo_ctrl = pass_expr_memo_ctrl base.Bandit.__init__(self, expr, do_checks=False, **bandit_kwargs) # -- This code was stolen from base.BanditAlgo, a class which may soon # be gone self.workdir = workdir self.s_new_ids = pyll.Literal('new_ids') # -- list at eval-time before = pyll.dfs(self.expr) # -- raises exception if expr contains cycles pyll.toposort(self.expr) vh = self.vh = VectorizeHelper(self.expr, self.s_new_ids) # -- raises exception if v_expr contains cycles pyll.toposort(vh.v_expr) idxs_by_label = vh.idxs_by_label() vals_by_label = vh.vals_by_label() after = pyll.dfs(self.expr) # -- try to detect if VectorizeHelper screwed up anything inplace assert before == after assert set(idxs_by_label.keys()) == set(vals_by_label.keys()) assert set(idxs_by_label.keys()) == set(self.params.keys()) # -- make the graph runnable and SON-encodable # N.B. operates inplace self.s_idxs_vals = recursive_set_rng_kwarg( pyll.scope.pos_args(idxs_by_label, vals_by_label), pyll.as_apply(self.rng)) # -- raises an exception if no topological ordering exists pyll.toposort(self.s_idxs_vals)
def evaluate(self, config, ctrl): """Return a result document """ memo = self.memo_from_config(config) self.use_obj_for_literal_in_memo(ctrl, Ctrl, memo) if self.rng is not None and not self.installed_rng: # -- N.B. this modifies the expr graph in-place # XXX this feels wrong self.expr = recursive_set_rng_kwarg(self.expr, pyll.as_apply(self.rng)) self.installed_rng = True try: r_dct = pyll.rec_eval(self.expr, memo=memo) except Exception, e: n_match = 0 for match, match_pair in self.exceptions: if match(e): r_dct = match_pair(e) n_match += 1 break if n_match == 0: raise
def test_vectorize_config0(): p0 = hp_uniform('p0', 0, 1) p1 = hp_loguniform('p1', 2, 3) p2 = hp_choice('p2', [-1, p0]) p3 = hp_choice('p3', [-2, p1]) p4 = 1 p5 = [3, 4, p0] p6 = hp_choice('p6', [-3, p1]) d = locals() d['p1'] = None # -- don't sample p1 all the time, only if p3 says so config = as_apply(d) N = as_apply('N:TBA') expr = config expr_idxs = scope.range(N) vh = VectorizeHelper(expr, expr_idxs, build=True) vconfig = vh.v_expr full_output = as_apply([vconfig, vh.idxs_by_label(), vh.vals_by_label()]) if 1: print '=' * 80 print 'VECTORIZED' print full_output print '\n' * 1 fo2 = replace_repeat_stochastic(full_output) if 0: print '=' * 80 print 'VECTORIZED STOCHASTIC' print fo2 print '\n' * 1 new_vc = recursive_set_rng_kwarg(fo2, as_apply(np.random.RandomState(1))) if 0: print '=' * 80 print 'VECTORIZED STOCHASTIC WITH RNGS' print new_vc Nval = 10 foo, idxs, vals = rec_eval(new_vc, memo={N: Nval}) print 'foo[0]', foo[0] print 'foo[1]', foo[1] assert len(foo) == Nval if 0: # XXX refresh these values to lock down sampler assert foo[0] == { 'p0': 0.39676747423066994, 'p1': None, 'p2': 0.39676747423066994, 'p3': 2.1281244479293568, 'p4': 1, 'p5': (3, 4, 0.39676747423066994) } assert foo[1] != foo[2] print idxs print vals['p3'] print vals['p6'] print idxs['p1'] print vals['p1'] assert len(vals['p3']) == Nval assert len(vals['p6']) == Nval assert len(idxs['p1']) < Nval p1d = dict(zip(idxs['p1'], vals['p1'])) for ii, (p3v, p6v) in enumerate(zip(vals['p3'], vals['p6'])): if p3v == p6v == 0: assert ii not in idxs['p1'] if p3v: assert foo[ii]['p3'] == p1d[ii] if p6v: print 'p6', foo[ii]['p6'], p1d[ii] assert foo[ii]['p6'] == p1d[ii]
def __init__(self, bandit, seed=seed, cmd=None, workdir=None): self.bandit = bandit self.seed = seed self.rng = np.random.RandomState(self.seed) self.cmd = cmd self.workdir = workdir self.new_ids = ['dummy_id'] # -- N.B. not necessarily actually a range self.s_new_ids = pyll.Literal(self.new_ids) self.template_clone_memo = {} template = pyll.clone(self.bandit.template, self.template_clone_memo) vh = self.vh = VectorizeHelper(template, self.s_new_ids) vh.build_idxs() vh.build_vals() # the keys (nid) here are strings like 'node_5' idxs_by_nid = vh.idxs_by_id() vals_by_nid = vh.vals_by_id() name_by_nid = vh.name_by_id() assert set(idxs_by_nid.keys()) == set(vals_by_nid.keys()) assert set(name_by_nid.keys()) == set(vals_by_nid.keys()) # -- replace repeat(dist(...)) with vectorized versions t_i_v = replace_repeat_stochastic( pyll.as_apply([ vh.vals_memo[template], idxs_by_nid, vals_by_nid])) assert t_i_v.name == 'pos_args' template, s_idxs_by_nid, s_vals_by_nid = t_i_v.pos_args # -- fetch the dictionaries off the top of the cloned graph idxs_by_nid = dict(s_idxs_by_nid.named_args) vals_by_nid = dict(s_vals_by_nid.named_args) # -- remove non-stochastic nodes from the idxs and vals # because # (a) they should be irrelevant for BanditAlgo operation, # (b) they can be reconstructed from the template and the # stochastic choices, and # (c) they are often annoying when printing / saving. for node_id, name in name_by_nid.items(): if name not in pyll.stochastic.implicit_stochastic_symbols: del name_by_nid[node_id] del vals_by_nid[node_id] del idxs_by_nid[node_id] elif name == 'one_of': # -- one_of nodes too, because they are duplicates of randint del name_by_nid[node_id] del vals_by_nid[node_id] del idxs_by_nid[node_id] # -- make the graph runnable and SON-encodable # N.B. operates inplace self.s_specs_idxs_vals = recursive_set_rng_kwarg( scope.pos_args(template, idxs_by_nid, vals_by_nid), pyll.as_apply(self.rng)) self.vtemplate = template self.idxs_by_nid = idxs_by_nid self.vals_by_nid = vals_by_nid self.name_by_nid = name_by_nid # -- compute some document coordinate strings for the node_ids pnames = pretty_names(bandit.template, prefix=None) doc_coords = self.doc_coords = {} for node, pname in pnames.items(): cnode = self.template_clone_memo[node] if cnode.name == 'one_of': choice_node = vh.choice_memo[cnode] assert choice_node.name == 'randint' doc_coords[vh.node_id[choice_node]] = pname #+ '.randint' if cnode in vh.node_id and vh.node_id[cnode] in name_by_nid: doc_coords[vh.node_id[cnode]] = pname else: #print 'DROPPING', node pass
def __init__(self, fn, expr, workdir=None, pass_expr_memo_ctrl=None, name=None, loss_target=None, ): """ Paramaters ---------- fn : callable This stores the `fn` argument to `fmin`. (See `hyperopt.fmin.fmin`) expr : hyperopt.pyll.Apply This is the `space` argument to `fmin`. (See `hyperopt.fmin.fmin`) workdir : string (or None) If non-None, the current working directory will be `workdir`while `expr` and `fn` are evaluated. (XXX Currently only respected by jobs run via MongoWorker) pass_expr_memo_ctrl : bool If True, `fn` will be called like this: `fn(self.expr, memo, ctrl)`, where `memo` is a dictionary mapping `Apply` nodes to their computed values, and `ctrl` is a `Ctrl` instance for communicating with a Trials database. This lower-level calling convention is useful if you want to call e.g. `hyperopt.pyll.rec_eval` yourself in some customized way. name : string (or None) Label, used for pretty-printing. loss_target : float (or None) The actual or estimated minimum of `fn`. Some optimization algorithms may behave differently if their first objective is to find an input that achieves a certain value, rather than the more open-ended objective of pure minimization. XXX: Move this from Domain to be an fmin arg. """ self.fn = fn if pass_expr_memo_ctrl is None: self.pass_expr_memo_ctrl = getattr(fn, 'fmin_pass_expr_memo_ctrl', False) else: self.pass_expr_memo_ctrl = pass_expr_memo_ctrl self.expr = pyll.as_apply(expr) self.params = {} for node in pyll.dfs(self.expr): if node.name == 'hyperopt_param': label = node.arg['label'].obj if label in self.params: raise DuplicateLabel(label) self.params[label] = node.arg['obj'] self.loss_target = loss_target self.name = name self.workdir = workdir self.s_new_ids = pyll.Literal('new_ids') # -- list at eval-time before = pyll.dfs(self.expr) # -- raises exception if expr contains cycles pyll.toposort(self.expr) vh = self.vh = VectorizeHelper(self.expr, self.s_new_ids) # -- raises exception if v_expr contains cycles pyll.toposort(vh.v_expr) idxs_by_label = vh.idxs_by_label() vals_by_label = vh.vals_by_label() after = pyll.dfs(self.expr) # -- try to detect if VectorizeHelper screwed up anything inplace assert before == after assert set(idxs_by_label.keys()) == set(vals_by_label.keys()) assert set(idxs_by_label.keys()) == set(self.params.keys()) self.s_rng = pyll.Literal('rng-placeholder') # -- N.B. operates inplace: self.s_idxs_vals = recursive_set_rng_kwarg( pyll.scope.pos_args(idxs_by_label, vals_by_label), self.s_rng) # -- raises an exception if no topological ordering exists pyll.toposort(self.s_idxs_vals) # -- Protocol for serialization. # self.cmd indicates to e.g. MongoWorker how this domain # should be [un]serialized. # XXX This mechanism deserves review as support for ipython # workers improves. self.cmd = ('domain_attachment', 'FMinIter_Domain')
def test_vectorize_config0(): config = config0() assert 'p3' == config.named_args[2][0] p1 = config.named_args[2][1].pos_args[1] assert p1.name == 'uniform' assert p1.pos_args[0]._obj == 2 assert p1.pos_args[1]._obj == 3 N = as_apply(5) expr = config expr_idxs = scope.range(N) vh = VectorizeHelper(expr, expr_idxs) vh.build_idxs() vh.build_vals() vconfig = vh.vals_memo[expr] full_output = as_apply([vconfig, vh.idxs_by_id(), vh.vals_by_id()]) if 0: print '=' * 80 print 'VECTORIZED' print full_output print '\n' * 1 fo2 = replace_repeat_stochastic(full_output) if 0: print '=' * 80 print 'VECTORIZED STOCHASTIC' print fo2 print '\n' * 1 new_vc = recursive_set_rng_kwarg( fo2, as_apply(np.random.RandomState(1)) ) if 0: print '=' * 80 print 'VECTORIZED STOCHASTIC WITH RNGS' print new_vc foo, idxs, vals = rec_eval(new_vc) print foo #print idxs #print vals assert len(foo) == 5 assert foo[0] == { 'p0': 0.39676747423066994, 'p2': 0.39676747423066994, 'p3': 2.1281244479293568, 'p4': 1, 'p5': (3, 4, 0.39676747423066994) } assert foo[1] != foo[2] if 0: print idxs[vh.node_id[p1]] print vals[vh.node_id[p1]] # - p1 is only used sometimes assert len(idxs[vh.node_id[p1]]) < 5 for ii in range(5): if ii in idxs[vh.node_id[p1]]: assert foo[ii]['p3'] == vals[vh.node_id[p1]][list(idxs[vh.node_id[p1]]).index(ii)] else: assert foo[ii]['p3'] == -2, foo[ii]['p3']
def __init__( self, fn, expr, workdir=None, pass_expr_memo_ctrl=None, name=None, loss_target=None, ): """ Paramaters ---------- fn : callable This stores the `fn` argument to `fmin`. (See `hyperopt.fmin.fmin`) expr : hyperopt.pyll.Apply This is the `space` argument to `fmin`. (See `hyperopt.fmin.fmin`) workdir : string (or None) If non-None, the current working directory will be `workdir`while `expr` and `fn` are evaluated. (XXX Currently only respected by jobs run via MongoWorker) pass_expr_memo_ctrl : bool If True, `fn` will be called like this: `fn(self.expr, memo, ctrl)`, where `memo` is a dictionary mapping `Apply` nodes to their computed values, and `ctrl` is a `Ctrl` instance for communicating with a Trials database. This lower-level calling convention is useful if you want to call e.g. `hyperopt.pyll.rec_eval` yourself in some customized way. name : string (or None) Label, used for pretty-printing. loss_target : float (or None) The actual or estimated minimum of `fn`. Some optimization algorithms may behave differently if their first objective is to find an input that achieves a certain value, rather than the more open-ended objective of pure minimization. XXX: Move this from Domain to be an fmin arg. """ self.fn = fn if pass_expr_memo_ctrl is None: self.pass_expr_memo_ctrl = getattr(fn, 'fmin_pass_expr_memo_ctrl', False) else: self.pass_expr_memo_ctrl = pass_expr_memo_ctrl self.expr = pyll.as_apply(expr) self.params = {} for node in pyll.dfs(self.expr): if node.name == 'hyperopt_param': label = node.arg['label'].obj if label in self.params: raise DuplicateLabel(label) self.params[label] = node.arg['obj'] self.loss_target = loss_target self.name = name self.workdir = workdir self.s_new_ids = pyll.Literal('new_ids') # -- list at eval-time before = pyll.dfs(self.expr) # -- raises exception if expr contains cycles pyll.toposort(self.expr) vh = self.vh = VectorizeHelper(self.expr, self.s_new_ids) # -- raises exception if v_expr contains cycles pyll.toposort(vh.v_expr) idxs_by_label = vh.idxs_by_label() vals_by_label = vh.vals_by_label() after = pyll.dfs(self.expr) # -- try to detect if VectorizeHelper screwed up anything inplace assert before == after assert set(idxs_by_label.keys()) == set(vals_by_label.keys()) assert set(idxs_by_label.keys()) == set(self.params.keys()) self.s_rng = pyll.Literal('rng-placeholder') # -- N.B. operates inplace: self.s_idxs_vals = recursive_set_rng_kwarg( pyll.scope.pos_args(idxs_by_label, vals_by_label), self.s_rng) # -- raises an exception if no topological ordering exists pyll.toposort(self.s_idxs_vals) # -- Protocol for serialization. # self.cmd indicates to e.g. MongoWorker how this domain # should be [un]serialized. # XXX This mechanism deserves review as support for ipython # workers improves. self.cmd = ('domain_attachment', 'FMinIter_Domain')
def test_vectorize_config0(): p0 = hp_uniform('p0', 0, 1) p1 = hp_loguniform('p1', 2, 3) p2 = hp_choice('p2', [-1, p0]) p3 = hp_choice('p3', [-2, p1]) p4 = 1 p5 = [3, 4, p0] p6 = hp_choice('p6', [-3, p1]) d = locals() d['p1'] = None # -- don't sample p1 all the time, only if p3 says so config = as_apply(d) N = as_apply('N:TBA') expr = config expr_idxs = scope.range(N) vh = VectorizeHelper(expr, expr_idxs, build=True) vconfig = vh.v_expr full_output = as_apply([vconfig, vh.idxs_by_label(), vh.vals_by_label()]) if 1: print '=' * 80 print 'VECTORIZED' print full_output print '\n' * 1 fo2 = replace_repeat_stochastic(full_output) if 0: print '=' * 80 print 'VECTORIZED STOCHASTIC' print fo2 print '\n' * 1 new_vc = recursive_set_rng_kwarg( fo2, as_apply(np.random.RandomState(1)) ) if 0: print '=' * 80 print 'VECTORIZED STOCHASTIC WITH RNGS' print new_vc Nval = 10 foo, idxs, vals = rec_eval(new_vc, memo={N: Nval}) print 'foo[0]', foo[0] print 'foo[1]', foo[1] assert len(foo) == Nval if 0: # XXX refresh these values to lock down sampler assert foo[0] == { 'p0': 0.39676747423066994, 'p1': None, 'p2': 0.39676747423066994, 'p3': 2.1281244479293568, 'p4': 1, 'p5': (3, 4, 0.39676747423066994) } assert foo[1] != foo[2] print idxs print vals['p3'] print vals['p6'] print idxs['p1'] print vals['p1'] assert len(vals['p3']) == Nval assert len(vals['p6']) == Nval assert len(idxs['p1']) < Nval p1d = dict(zip(idxs['p1'], vals['p1'])) for ii, (p3v, p6v) in enumerate(zip(vals['p3'], vals['p6'])): if p3v == p6v == 0: assert ii not in idxs['p1'] if p3v: assert foo[ii]['p3'] == p1d[ii] if p6v: print 'p6', foo[ii]['p6'], p1d[ii] assert foo[ii]['p6'] == p1d[ii]