Example #1
0
    def f(par):
        params.setvalues(par)
        p = params
        w=utils.calculateweights(t,sfr(t,params))
        # isow=iso.getisosweights(w,10.**t,metallicity(t,params),isos)
        if p.sigma > 0.:
            if p.dsigmadlogt == 0.:
                isow=iso.getisosweights_gauss(w,10.**t,metallicity(t,p),isos,p.sigma)
            if p.dsigmadlogt != 0.:
#               print "Gaussian sigma, ds/dlogt ",p.sigma,p.dsigmadlogt
                isow=iso.getisosweights_vgauss(w,10.**t,metallicity(t,p),isos,p.sigma,p.dsigmadlogt)
            if p.dsigmadlogs != 0.: # Hook for SFR-depenedent spread; not fully implemented 
                isow=iso.getisosweights_sgauss(w,10.**t,sfr(t,params),metallicity(t,p),
                   isos,p.sigma,p.dsigmadlogs)
        else:
            isow=iso.getisosweights(w,10.**t,metallicity(t,p),isos)

        m=iso.computeCMD(isow,isos)
        m=utils.normalize(m,sum(data.flat))
        return utils.loglikelihood(m,data)
Example #2
0
def compute_model(t,p,isos,data):
    """ Returns m,s,w,isow,model """
    m=fit.metallicity(t,p)
    s=fit.sfr(t,p)
    w=utils.calculateweights(t,s)
    if not p.pars.has_key('dsigmadlogt'):
        p.set('dsigmadlogt',0,0,False)
    if p.sigma > 0.:
        if p.dsigmadlogt == 0.:
            isow=iso.getisosweights_gauss(w,10.**t,m,isos,p.sigma)
        if p.dsigmadlogt != 0.:
            print "Gaussian sigma, ds/dlogt ",p.sigma,p.dsigmadlogt
            isow=iso.getisosweights_vgauss(w,10.**t,m,isos,p.sigma,p.dsigmadlogt)
    else:
        isow=iso.getisosweights(w,10.**t,m,isos)
    model=iso.computeCMD(isow,isos)
    model=utils.normalize(model,sum(data.flat))
    d = numarray.maximum(data,1e-20)
    llhC=sum( (d*numarray.log(d)).flat )
    value=utils.loglikelihood(model,data)
    print "henry:",value,"tom:",2.0*(value+llhC)
    return m,s,w,isow,model
Example #3
0
def compute_model(t, p, isos, data):
    """ Returns m,s,w,isow,model """
    m = fit.metallicity(t, p)
    s = fit.sfr(t, p)
    w = utils.calculateweights(t, s)
    if not p.pars.has_key('dsigmadlogt'):
        p.set('dsigmadlogt', 0, 0, False)
    if p.sigma > 0.:
        if p.dsigmadlogt == 0.:
            isow = iso.getisosweights_gauss(w, 10.**t, m, isos, p.sigma)
        if p.dsigmadlogt != 0.:
            print "Gaussian sigma, ds/dlogt ", p.sigma, p.dsigmadlogt
            isow = iso.getisosweights_vgauss(w, 10.**t, m, isos, p.sigma,
                                             p.dsigmadlogt)
    else:
        isow = iso.getisosweights(w, 10.**t, m, isos)
    model = iso.computeCMD(isow, isos)
    model = utils.normalize(model, sum(data.flat))
    d = numarray.maximum(data, 1e-20)
    llhC = sum((d * numarray.log(d)).flat)
    value = utils.loglikelihood(model, data)
    print "henry:", value, "tom:", 2.0 * (value + llhC)
    return m, s, w, isow, model
    w_logit = weight_variable((config.cell_output_size, config.num_classes))
    b_logit = bias_variable((config.num_classes, ))
logits = tf.nn.xw_plus_b(output, w_logit, b_logit)
softmax = tf.nn.softmax(logits)

# cross-entropy.
xent = tf.nn.softmax_cross_entropy_with_logits(logits, labels_ph)
xent = tf.reduce_mean(xent)
pred_labels = tf.argmax(logits, 1)
# 0/1 reward.
reward = tf.cast(
    tf.equal(pred_labels, tf.cast(tf.argmax(labels_ph, 1), tf.int64)),
    tf.float32)
rewards = tf.expand_dims(reward, 1)  # [batch_sz, 1]
rewards = tf.tile(rewards, (1, config.num_glimpses))  # [batch_sz, timesteps]
logll = loglikelihood(loc_mean_arr, sampled_loc_arr, config.loc_std)
advs = rewards - tf.stop_gradient(baselines)
logllratio = tf.reduce_mean(logll * advs)
reward = tf.reduce_mean(reward)

baselines_mse = tf.reduce_mean(tf.square((rewards - baselines)))
var_list = tf.trainable_variables()
# hybrid loss
loss = -logllratio + xent + baselines_mse  # `-` for minimize
grads = tf.gradients(loss, var_list)
grads, _ = tf.clip_by_global_norm(grads, config.max_grad_norm)

# learning rate
global_step = tf.get_variable('global_step', [],
                              initializer=tf.constant_initializer(0),
                              trainable=False)
Example #5
0
    pylab.imshow(d - m, origin='lower', interpolation="nearest")
    #    pylab.cool()
    pylab.savefig("graph.eps")


#    pylab.show()

plot_residuals(data, model)

f = open("protocol", "w")
f.write('best fit for "%s".\n' % isodir)
if gauss:
    f.write("simple model, gaussian spreading\n")
else:
    f.write("simple model, non gauss\n")
f.write("likelihood: %r, toms: %r\n" %
        (utils.loglikelihood(model, data), utils.tomslikelihood(model, data)))
f.write("parameters:\n\n")
p.write(f)
f.write("-" * 60 + "\n\n")

f.write("age, [Fe/H], sfr, weight\n\n")
for x in zip(list(t), list(m), list(s), list(w)):
    f.write("%f %f %f %.10f\n" % x)
f.write("-" * 60 + "\n\n")

f.write("isochrone_file_name, weight\n\n")
for i, x in enumerate(isow):
    f.write("%s  %e\n" % (iso.getfilename(i, isos), x))
f.write("-" * 60 + "\n\n")
Example #6
0
    #pylab.imshow((d-m)/m,origin='lower',interpolation="nearest")
    pylab.imshow(d-m,origin='lower',interpolation="nearest")
    #    pylab.cool()
    pylab.savefig("graph.eps")
#    pylab.show()

plot_residuals(data,model)


f=open("protocol","w")
f.write('best fit for "%s".\n'%isodir)
if gauss:
    f.write("simple model, gaussian spreading\n")
else:
    f.write("simple model, non gauss\n")
f.write("likelihood: %r, toms: %r\n"%(utils.loglikelihood(model,data),
    utils.tomslikelihood(model,data)))
f.write("parameters:\n\n")
p.write(f)
f.write("-"*60+"\n\n")

f.write("age, [Fe/H], sfr, weight\n\n")
for x in zip(list(t),list(m),list(s),list(w)):
    f.write("%f %f %f %.10f\n"%x)
f.write("-"*60+"\n\n")

f.write("isochrone_file_name, weight\n\n")
for i,x in enumerate(isow):
    f.write("%s  %e\n"%(iso.getfilename(i,isos),x))
f.write("-"*60+"\n\n")
Example #7
0
# adversarial shit ~~~~~~~~~
#x_adv = fgm(init_glimpse, softmax, xent, eps=.3)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~

#ret = tf.cond(use_adv, core_net(x_adv), lambda: (logits,softmax))

#logits, softmax = ret

pred_labels = tf.argmax(logits, 1)
# 0/1 reward.
reward = tf.cast(tf.equal(pred_labels, labels_ph),
                 tf.float32)  # get r=1 for correct classification, 0 otherwise
rewards = tf.expand_dims(reward, 1)  # [batch_sz, 1]
rewards = tf.tile(rewards, (1, config.num_glimpses))  # [batch_sz, timesteps]
logll = loglikelihood(
    loc_mean_arr, sampled_loc_arr,
    config.loc_std)  # aka log pi(u|s,theta) of stochastic policy
advs = rewards - tf.stop_gradient(baselines)
logllratio = tf.reduce_mean(logll * advs)
reward = tf.reduce_mean(reward)

baselines_mse = tf.reduce_mean(tf.square((rewards - baselines)))
var_list = tf.trainable_variables()
# hybrid loss
loss = -logllratio + xent + baselines_mse  # `-` for minimize
grads = tf.gradients(loss, var_list)
grads, _ = tf.clip_by_global_norm(grads, config.max_grad_norm)

# learning rate
global_step = tf.get_variable('global_step', [],
                              initializer=tf.constant_initializer(0),
Example #8
0
  def __init__(self, config, mnist):
    self.loc_mean_arr = []
    self.sampled_loc_arr = []
    self.next_inputs = []
    self.create_placeholders(config)
    self.create_auxiliary_networks(config)

    # 0.0 is the center of the image while -1 and 1 are the extrems
    # when taking glimpses
    init_loc = tf.random_uniform((self.n_examples, 2), minval=-1, maxval=1)
    init_glimpse = self.gl(init_loc)

    # Core network.
    lstm_cell = tf.nn.rnn_cell.LSTMCell(config.cell_size, state_is_tuple=True)
    init_state = lstm_cell.zero_state(self.n_examples, tf.float32)
    inputs = [init_glimpse]
    inputs.extend([0] * (config.num_glimpses))
    outputs, _ = tf.nn.seq2seq.rnn_decoder(
      inputs, init_state, lstm_cell, loop_function=self.get_next_input)


    # Time independent baselines
    # we want to reward only if larger than the expected reward
    with tf.variable_scope('baseline'):
      w_baseline = weight_variable((config.cell_out_size, 1))
      b_baseline = bias_variable((1,))
    baselines = []
    for t, output in enumerate(outputs[1:]):
      baseline_t = tf.nn.xw_plus_b(output, w_baseline, b_baseline)
      baseline_t = tf.squeeze(baseline_t)
      baselines.append(baseline_t)
    baselines = tf.pack(baselines)  # [timesteps, batch_sz]
    baselines = tf.transpose(baselines)  # [batch_sz, timesteps]

    # Take the last step only.
    output = outputs[-1]
    # Build classification network.
    with tf.variable_scope('cls'):
      w_logit = weight_variable((config.cell_out_size, config.num_classes))
      b_logit = bias_variable((config.num_classes,))
    logits = tf.nn.xw_plus_b(output, w_logit, b_logit)
    self.softmax = tf.nn.softmax(logits)

    # cross-entropy.
    self.xent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, self.labels_ph)
    self.xent = tf.reduce_mean(self.xent)
    pred_labels = tf.argmax(logits, 1)
    # 0/1 reward.
    reward = tf.cast(tf.equal(pred_labels, self.labels_ph), tf.float32)
    rewards = tf.expand_dims(reward, 1)  # [batch_sz, 1]
    rewards = tf.tile(rewards, (1, config.num_glimpses))  # [batch_sz, timesteps]
    logll = loglikelihood(self.loc_mean_arr, self.sampled_loc_arr, config.loc_std)
    self.advs = rewards - tf.stop_gradient(baselines)
    self.logllratio = tf.reduce_mean(logll * self.advs)
    self.reward = tf.reduce_mean(reward)

    self.baselines_mse = tf.reduce_mean(tf.square((rewards - baselines)))
    self.var_list = tf.trainable_variables()
    # hybrid loss
    self.loss = -self.logllratio + self.xent + self.baselines_mse  # `-` for minimize
    grads = tf.gradients(self.loss, self.var_list)
    self.grads, _ = tf.clip_by_global_norm(grads, config.max_grad_norm)