Beispiel #1
0
def ps_radio(kperp, z21, params):
    '''
    The power spectrum of known radio point sources
    Args:
        kperp, wavenumber perpindicular to line of sight (h/Mpc)
        z21, redshift where wavelength is 21cm.
        params, dictionary of parameters.
    Returns:
        power spectrum (K^2 h^3/Mpc^3)
    '''
    splkey = ('ps_radio') + dict2tuple(params)

    if not mkey in SPLINE_DICT.keys():
        if params['INCLUDE_ARCADE']:
            arccoeff = 0.
        else:
            arccoeff = 1.
        kperpvals = np.logspace(K_PERP_INTERP_MIN, KPERP_INTERP_MAX,
                                NINTERP_KPERP)
        psqvals = np.zeros_like(kperpvals)
        prefactor=(C/F21*(1+z21)/(1e3*KPC))**4/(64.*PI*PI*KBOLTZMANN**2.)\
        *LITTLEH*1e-3*C/cosmo.H0
        freq_obs = F21 / (1. + z21)
        for k, kp in enumerate(kperpvals):
            g=lambda x: 1./((1+x)**2.*cosmo.Ez(x))\
            *(arccoeff*params['ARCADE_BIAS']\
            *RADIO_BACKGROUND_MODELS['emiss_arc_const'](x,params)\
            *(freq_obs*(1+x)/1e9)**(-params['ARCADE_POW'])\
            +RADIO_BACKGROUND_MODELS['emiss_agn_fast'](z21,x)\
            *bias(params['MEFF_AGN'],x)\
            +RADIO_BACKGROUND_MODELS['emiss_sfg_fast'](z21,x)\
            *bias(params['MEFF_SFG'],x))**2.\
            *power_lin(kp,x)
            psqvals[k] = integrate.quad(g, z21, MAXZ)[0] * prefactor
        SPLINE_DICT[splkey]\
        =interp.interp1d(np.log(kperpvals),np.log(psqvals))
    zf = 1.
    if type(kperp) == np.ndarray:
        kmax = kperp > KPERP_INTERP_MAX
        kmin = kperp < KPERP_INTERP_MAX
        kint = np.logical_and(kperp >= K_PERP_INTERP_MIN,
                              kperp <= KPERP_INTERP_MAX)
        output = np.zeros_like(kperp)
        output[kmax] = 0.
        output[kmin]=\
        np.exp(SPLINE_DICT[splkey](np.log(KPERP_INTERP_MIN)))
        output[kint]=\
        np.exp(SPLINE_DICT[splkey](np.log(kperp[kint])))
        return output
    else:
        if kperp > K_PERP_INTERP_MIN:
            zf = 0.
            kperp = K_PERP_INTERP_MAX
        if kperp < K_PERP_INTERP_MIN:
            kperp = K_PERP_INTERP_MIN
        return zf * np.exp(SPLINE_DICT[splkey](np.log(kperp)))
Beispiel #2
0
def build_gen1(y, z1):
    # y is of dimension (batch_size, 8, 8, 3)
    gen1_z_embed = utils.batch_normalization(
        tf.nn.relu(
            utils.dense(z1,
                        num_inputs=50,
                        num_units=256,
                        bias=True,
                        name='gen1_z_embed')))

    y_flatten = tf.reshape(y, (-1, 8 * 8 * 3))
    gen1_y_embed = tf.nn.relu(
        utils.bias(utils.batch_normalization(
            utils.dense(y_flatten,
                        num_inputs=192,
                        num_units=512,
                        bias=False,
                        name='gen1_y_embed')), (512, ),
                   name='gen1_y_embed_bias'))

    gen1_in = tf.concat([gen1_z_embed, gen1_y_embed], axis=1)

    gen1_l1 = tf.transpose(
        tf.reshape(
            tf.nn.relu(
                utils.bias(utils.batch_normalization(
                    utils.dense(gen1_in,
                                num_inputs=768,
                                num_units=1024,
                                bias=False,
                                name='gen1_l1')), (1024, ),
                           name='gen1_l1_bias')), (-1, 64, 4, 4)),
        [0, 2, 3, 1])

    gen1_l2 = tf.nn.relu(
        utils.bias(utils.batch_normalization(
            utils.conv2d_transpose(gen1_l1, (4, 4, 64, 64), (100, 11, 11, 64),
                                   bias=False,
                                   padding='VALID',
                                   stride=(1, 2, 2, 1),
                                   name='gen1_l3')), (64, ),
                   name='gen1_l3_bias'))

    gen1_l3 = tf.sigmoid(
        utils.conv2d_transpose(gen1_l2, (6, 6, 3, 64), (100, 16, 16, 3),
                               padding='VALID',
                               name='gen1_l4'))

    return gen1_l3
Beispiel #3
0
 def g(x, bp=1.):
     rv = hi_helpers.rVir(10.**x, zval)
     rs=rv/HI_HELPERS['CSHIFUNC'](10.**x,zval,params)\
     *(1.+zval)/1e3
     rt = params['RT'] * (1. + zval) / 1e3
     rht = rs * rt / (rs + rt)
     volratio = hi_helpers.expvol(rv, rht) / hi_helpers.expvol(
         rv, rs)
     return HI_HELPERS['MHIFUNC'](10.**x,zvals,params)\
     /TS_HELPERS['TSFUNC'](10.**x,zvals,params)*volratio\
     *massfunc(10.**x,zval)*bias(10.**x,zval)**bp
Beispiel #4
0
def bias_hi(z21, params):
    '''
    bias of HI
    Args:
        z21, float, redshift
        params, dictionary of parameters.
    Returns:
        bias of HI (unitless)
    '''
    splkey = ('bias', 'hi') + dict2tuple(params)
    if not SPLINE_DICT.has_key(splkey):
        zvals = np.linspace(0, MAXZ, NINTERP_Z)
        biasvals = np.zeros_like(zvals)
        for znum, zval in enumerate(zvals):
            g=lambda x:HI_HELPERS[params['MHIFUNC']](10.**x,zval,params)\
            *massfunc(10.**x,zval)*bias(10.**x,zval)
            biasvals[znum]=integrate.quad(g,M_INTERP_MIN,M_INTERP_MAX)[0]\
            /rho_hi(zval,params)
        SPLINE_DICT[splkey] = interp.interp1d(zvals, np.log(biasvals))
    return np.exp(SPLINE_DICT[splkey](z21))
Beispiel #5
0
def build_gen0(h1, z0, preload_weights=32 * [None]):
    gen0_z_embed1 = tf.nn.relu(
        utils.bias(
            utils.batch_normalization(utils.dense(
                z0,
                num_inputs=16,
                num_units=128,
                bias=False,
                weight_preset=preload_weights[0],
                name='gen0_z_embed1'),
                                      scale_preset=preload_weights[2],
                                      mean_preset=preload_weights[3],
                                      variance_preset=preload_weights[4]),
            # Bias:
            (
                128, ),
            bias_preset=preload_weights[1],
            name='gen0_z_embed1_bias'))

    gen0_z_embed2 = tf.nn.relu(
        utils.bias(
            utils.batch_normalization(utils.dense(
                gen0_z_embed1,
                num_inputs=128,
                num_units=128,
                bias=False,
                weight_preset=preload_weights[5],
                name='gen0_z_embed2'),
                                      scale_preset=preload_weights[7],
                                      mean_preset=preload_weights[8],
                                      variance_preset=preload_weights[9]),
            # Bias:
            (
                128, ),
            bias_preset=preload_weights[6],
            name='gen0_z_embed2_bias'))

    h1_flatten = tf.reshape(h1, (-1, 16 * 16 * 3))

    gen0_in = tf.concat([h1_flatten, gen0_z_embed2], axis=1)

    gen0_in_reshaped = tf.transpose(
        tf.reshape(
            tf.nn.relu(
                utils.bias(
                    utils.batch_normalization(
                        utils.dense(gen0_in,
                                    num_inputs=896,
                                    num_units=256 * 5 * 5,
                                    bias=False,
                                    weight_preset=preload_weights[10],
                                    name='gen0_embed'),
                        scale_preset=preload_weights[12],
                        mean_preset=preload_weights[13],
                        variance_preset=preload_weights[14]),
                    # Bias:
                    (
                        256 * 5 * 5, ),
                    bias_preset=preload_weights[11],
                    name='gen0_embed_bias')),
            [-1, 256, 5, 5]),
        [0, 2, 3, 1])

    gen0_deconv1 = tf.nn.relu(
        utils.bias(
            utils.batch_normalization(utils.conv2d_transpose(
                gen0_in_reshaped, (5, 5, 256, 256), (100, 10, 10, 256),
                bias=False,
                weight_preset=preload_weights[15],
                stride=(1, 2, 2, 1),
                padding='SAME',
                name='gen0_deconv1'),
                                      scale_preset=preload_weights[17],
                                      mean_preset=preload_weights[18],
                                      variance_preset=preload_weights[19]),
            # Bias
            (
                256, ),
            bias_preset=preload_weights[16],
            name='gen0_deconv1_bias'))

    gen0_deconv2 = tf.nn.relu(
        utils.bias(
            utils.batch_normalization(utils.conv2d_transpose(
                gen0_deconv1, (5, 5, 128, 256), (100, 14, 14, 128),
                bias=False,
                weight_preset=preload_weights[20],
                padding='VALID',
                name='gen0_deconv2'),
                                      scale_preset=preload_weights[22],
                                      mean_preset=preload_weights[23],
                                      variance_preset=preload_weights[24]),
            # Bias:
            (
                128, ),
            bias_preset=preload_weights[21],
            name='gen0_deconv2_bias'))

    gen0_deconv3 = tf.nn.relu(
        utils.bias(
            utils.batch_normalization(utils.conv2d_transpose(
                gen0_deconv2, (5, 5, 128, 128), (100, 28, 28, 128),
                bias=False,
                weight_preset=preload_weights[25],
                stride=(1, 2, 2, 1),
                padding='SAME',
                name='gen0_deconv3'),
                                      scale_preset=preload_weights[27],
                                      mean_preset=preload_weights[28],
                                      variance_preset=preload_weights[29]),
            # Bias:
            (
                128, ),
            bias_preset=preload_weights[26],
            name='gen0_deconv3_bias'))

    gen0_deconv4 = tf.sigmoid(
        utils.conv2d_transpose(gen0_deconv3, (5, 5, 3, 128), (100, 32, 32, 3),
                               weight_preset=preload_weights[30],
                               bias_preset=preload_weights[31],
                               padding='VALID',
                               name='gen0_deconv4'))
    return gen0_deconv4
Beispiel #6
0
    for x,y in test_loader:
        if torch.cuda.is_available():
            x = x.to(device)
            y = y.to(device)
        else:
            x = x
            y = y
        y_pred, _ = model(x)
        
        y_pred_dnorm = denormalize_data(y_pred.view(-1, opt.n_inp, opt.n_points).cpu(), min_value, max_value)
        y_dnorm = denormalize_data(y.view(-1, opt.n_inp, opt.n_points).cpu(), min_value, max_value)
        
        loss_test = loss_fn(y_pred_dnorm, y_dnorm)
        
        logs_test['mse'] = loss_test.item()
        logs_test['rmse'] = np.sqrt(loss_test.item())
        logs_test['bias'] = bias(y_pred_dnorm, y_dnorm)
        logs_test['err-rel'] = rel_error(y_pred_dnorm, y_dnorm)
        
        logger.log('test', logs_test)
        
print("\n\n================================================")
print(" *  Test MSE: ", logs_test['mse'],
      "\n *  Test RMSE: ", logs_test['rmse'],
      "\n *  Test Bias: ", logs_test['bias'],
      "\n *  Test Rel-Err (%): ", logs_test['err-rel'])
print("================================================\n")


logger.save(model)
Beispiel #7
0
def get(x, keep_prob, width, height):
    """
    input:
        x: placeholder for the image batch [Nome, widht, height, depth]
        keep_prob: dropout probability
        widht, height of the images in the train dataset
    returns:
        unbounded logits
        [summaries]
    """

    summaries = []

    # define weights and biases for first conv layer
    with tf.variable_scope('L1') as l1:
        # input: the placeholder x [None, width ,height]
        # output: 32 features for each 5x5 window
        W1, W1_s = utils.kernels([5, 5, 1, 32], name="W1")
        b1, b1_s = utils.bias([32], name="b1")
        summaries += W1_s
        summaries += b1_s

        h1 = tf.nn.relu(
            tf.nn.conv2d(input=x,
                         filter=W1,
                         strides=[1, 1, 1, 1],
                         padding='SAME') + b1,
            name="ReLU")

        mp1 = tf.nn.max_pool(value=h1,
                             ksize=[1, 2, 2, 1],
                             strides=[1, 2, 2, 1],
                             padding='SAME',
                             name=l1.name)
        # now the "image" is (width/2)x(height/2)

    with tf.variable_scope('L2') as l2:
        W2, W2_s = utils.kernels([5, 5, 32, 64], name="W2")
        b2, b2_s = utils.bias([64], name="b2")
        summaries += W2_s
        summaries += b2_s

        h2 = tf.nn.relu(
            tf.nn.conv2d(input=mp1,
                         filter=W2,
                         strides=[1, 1, 1, 1],
                         padding='SAME') + b2,
            name="ReLU")
        mp2 = tf.nn.max_pool(value=h2,
                             ksize=[1, 2, 2, 1],
                             strides=[1, 2, 2, 1],
                             padding='SAME',
                             name=l2.name)
        # now the "image" is (width/4)x(height/4) (x 64)

    with tf.variable_scope('FC1') as l_fc1:
        # flatter every image in a row of length: (width/4)*(height/4)*64
        # -1 means: automatically find the number of rows (that's the batch size btw)
        components = math.ceil(width / 4) * math.ceil(height / 4) * 64
        mp2_flat = tf.reshape(tensor=mp2, shape=[-1, components])
        W_fc1, W_fc1_s = utils.weight([components, 1024], name="W_fc1")
        b_fc1, b_fc1_s = utils.bias([1024], name="b_fc1")

        summaries += W_fc1_s
        summaries += b_fc1_s

        h_fc1 = tf.nn.relu(tf.matmul(mp2_flat, W_fc1) + b_fc1, name=l_fc1.name)

        # add dropout in training in order to reduce overfitting
        h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

    with tf.variable_scope('FC2'):
        # readout layer
        W_fc2, W_fc2_s = utils.weight([1024, 2], name="W_fc2")
        b_fc2, b_fc2_s = utils.bias([2], name="b_fc2")

        summaries += W_fc2_s
        summaries += b_fc2_s

        unscaled_logits = tf.add(
            tf.matmul(h_fc1_drop, W_fc2),
            b_fc2, name="unscaled_logits")
        return unscaled_logits, summaries