Example #1
0
def alpha_from_angle(y_reco, y_true):
    zep, zet, azp, azt = y_reco[:, 1], y_true[:, 1], y_reco[:, 2], y_true[:, 2]
    cosalpha = abs(sin(zep)) * cos(azp) * sin(zet) * cos(azt) + abs(
        sin(zep)) * sin(azp) * sin(zet) * sin(azt) + cos(zep) * cos(zet)
    cosalpha -= tf.math.sign(cosalpha) * eps
    alpha = acos(cosalpha)
    return alpha
Example #2
0
def cos_angle(y_reco, y_true):
    zep, zet, azp, azt = y_reco[:, 1], y_true[:, 1], y_reco[:, 2], y_true[:, 2]
    # cosalpha=abs(sin(zep))*cos(azp)*sin(zet)*cos(azt)+abs(sin(zep))*sin(azp)*sin(zet)*sin(azt)+cos(zep)*cos(zet)
    cosalpha = abs(sin(zep)) * abs(sin(zet)) * cos(azp - azt) + cos(zep) * cos(
        zet)  #check for double absolutes
    cosalpha -= tf.math.sign(cosalpha) * eps
    return cosalpha
Example #3
0
def sqr_vonMises23D_angle(y_reco, y_true, re=False):

    #energy
    loss_energy = reduce_mean(
        tf.math.squared_difference(y_reco[:, 0], y_true[:, 0]))  #mae again

    polar_k = abs(y_reco[:, 3]) + eps
    zenth_k = abs(y_reco[:, 4]) + eps

    cos_azi = cos(subtract(y_true[:, 2], y_reco[:, 2]))

    cos_zenth = cos(subtract(y_true[:, 1], y_reco[:, 1]))

    lnI0_azi = polar_k + tf.math.log(
        1 + tf.math.exp(-2 * polar_k)
    ) - 0.25 * tf.math.log(1 + 0.25 * tf.square(polar_k)) + tf.math.log(
        1 + 0.24273 * tf.square(polar_k)) - tf.math.log(1 + 0.43023 *
                                                        tf.square(polar_k))
    lnI0_zenth = zenth_k + tf.math.log(
        1 + tf.math.exp(-2 * zenth_k)
    ) - 0.25 * tf.math.log(1 + 0.25 * tf.square(zenth_k)) + tf.math.log(
        1 + 0.24273 * tf.square(zenth_k)) - tf.math.log(1 + 0.43023 *
                                                        tf.square(zenth_k))

    llh_azi = polar_k * cos_azi - lnI0_azi
    llh_zenith = zenth_k * cos_zenth - lnI0_zenth

    loss_azi = reduce_mean(-llh_azi)
    loss_zenith = reduce_mean(-llh_zenith)

    kappa = tf.math.abs(y_reco[:, 5]) + eps
    cos_alpha = cos_angle(y_reco, y_true)
    # tf.debugging.assert_less_equal(tf.math.abs(cos_alpha), 1, message='cos_alpha problem', summarize=None, name=None)
    tf.debugging.assert_all_finite(tf.math.abs(cos_alpha),
                                   message='cos_alpha problem infinite/nan',
                                   name=None)
    nlogC = -tf.math.log(kappa) + kappa + tf.math.log(1 -
                                                      tf.math.exp(-2 * kappa))
    tf.debugging.assert_all_finite(nlogC, 'log kappa problem', name=None)

    loss_angle = tf.reduce_mean(-kappa * cos_alpha + nlogC)

    if not re:
        return loss_azi + loss_zenith + loss_energy + loss_angle
    if re:
        return float(loss_azi + loss_zenith + loss_energy + loss_angle), [
            float(loss_energy),
            float(loss_zenith),
            float(loss_azi),
            float(loss_angle)
        ]
Example #4
0
def mape(y_true, y_pred):
    mask = y_true[:, :, :, 1]
    y_true = y_true[:, :, :, 0]
    output = tf_maths.abs(y_true - y_pred) / y_true
    output = tf_where(tf_maths.is_nan(output), mask, output)
    output = tf_where(tf_maths.is_inf(output), mask, output)

    return tf_maths.reduce_sum(output) / tf_maths.reduce_sum(mask)
Example #5
0
 def log_ggd(x, p, mu, alpha):
     cp = tf.cast(
         log(p) -
         ((p + 1) / p) * tf.cast(log(2.0), settings.float_type) -
         lgamma(1 / p), settings.float_type)
     res = tf.cast(
         cp - log(alpha) - pow(abs(x - mu), p) / (2 * pow(alpha, p)),
         settings.float_type)
     return res
    def update_state(self, y_true: tf.Tensor, y_pred: tf.Tensor, sample_weight=None) -> NoReturn:
        y_true = tf.cast(y_true, tf.float32)
        y_pred = tf.cast(y_pred, tf.float32)
        y_pred = tf.reshape(y_pred, shape=self.shape)

        mape = math.abs(y_true - y_pred) / K.maximum(y_true, 0.001)
        avg_mape = K.mean(mape)

        self.avg_mape = avg_mape
Example #7
0
def abs_negcos_angle(y_reco, y_true, re=False):
    # Energy loss
    loss_energy = reduce_mean(abs(subtract(y_reco[:,0], y_true[:,0]))) #this works well but could maybe be improved
    # Angle loss
    loss_angle = reduce_mean(1-cos_angle(y_reco, y_true))
    if not re:
        return loss_energy+loss_angle
    else:   
        return float(loss_energy+loss_angle), [float(loss_energy), float(loss_angle)]
Example #8
0
def abs_vM2D_KDE_weak(y_reco, y_true, kdet, re=False):
    #energy
    loss_energy = reduce_mean(abs(subtract(y_reco[:, 0],
                                           y_true[:, 0])))  #mae again

    polar_k = abs(y_reco[:, 3]) + eps
    zenth_k = abs(y_reco[:, 4]) + eps

    cos_azi = cos(subtract(y_true[:, 2], y_reco[:, 2]))

    cos_zenth = cos(subtract(y_true[:, 1], y_reco[:, 1]))

    lnI0_azi = polar_k + tf.math.log(
        1 + tf.math.exp(-2 * polar_k)
    ) - 0.25 * tf.math.log(1 + 0.25 * tf.square(polar_k)) + tf.math.log(
        1 + 0.24273 * tf.square(polar_k)) - tf.math.log(1 + 0.43023 *
                                                        tf.square(polar_k))
    lnI0_zenth = zenth_k + tf.math.log(
        1 + tf.math.exp(-2 * zenth_k)
    ) - 0.25 * tf.math.log(1 + 0.25 * tf.square(zenth_k)) + tf.math.log(
        1 + 0.24273 * tf.square(zenth_k)) - tf.math.log(1 + 0.43023 *
                                                        tf.square(zenth_k))

    llh_azi = polar_k * cos_azi - lnI0_azi
    llh_zenith = zenth_k * cos_zenth - lnI0_zenth

    loss_azi = reduce_mean(-llh_azi)
    loss_zenith = reduce_mean(-llh_zenith)
    kder = kde(tf.cast(y_reco[:, 1], tf.float32))
    kdeloss = tf.reduce_mean(
        tf.math.abs(kdet.log_prob(xkde) - kder.log_prob(xkde))) / 10

    if not re:
        return loss_azi + loss_zenith + loss_energy + tf.cast(
            kdeloss, tf.float32)
    if re:
        return float(loss_azi + loss_zenith + loss_energy + kdeloss), [
            float(loss_energy),
            float(loss_zenith),
            float(loss_azi),
            float(kdeloss)
        ]
def adaptive_wing_loss(labels, output):
    alpha = 2.1
    omega = 14
    epsilon = 1
    theta = 0.5
    with tf.name_scope('adaptive_wing_loss'):
        x = output - labels
        theta_over_epsilon_tensor = tf.fill(tf.shape(labels), theta/epsilon)
        A = omega*(1/(1+pow(theta_over_epsilon_tensor, alpha-labels)))*(alpha-labels)*pow(theta_over_epsilon_tensor, alpha-labels-1)*(1/epsilon)
        C = theta*A-omega*log(1+pow(theta_over_epsilon_tensor, alpha-labels))
        absolute_x = abs(x)
        losses = tf.where(greater(theta, absolute_x), omega*log(1+pow(absolute_x/epsilon, alpha-labels)), A*absolute_x-C)
        loss = reduce_mean(reduce_sum(losses, axis=[1, 2]), axis=0)
        return loss
Example #10
0
def sqr_vonMises2D_angle(y_reco, y_true, re=False):
    #energy
    loss_energy = reduce_mean(
        tf.math.squared_difference(y_reco[:, 0], y_true[:, 0]))  #mae again

    polar_k = abs(y_reco[:, 3]) + eps
    zenth_k = abs(y_reco[:, 4]) + eps

    cos_azi = cos(subtract(y_true[:, 2], y_reco[:, 2]))

    cos_zenth = cos(subtract(y_true[:, 1], y_reco[:, 1]))

    lnI0_azi = polar_k + tf.math.log(
        1 + tf.math.exp(-2 * polar_k)
    ) - 0.25 * tf.math.log(1 + 0.25 * tf.square(polar_k)) + tf.math.log(
        1 + 0.24273 * tf.square(polar_k)) - tf.math.log(1 + 0.43023 *
                                                        tf.square(polar_k))
    lnI0_zenth = zenth_k + tf.math.log(
        1 + tf.math.exp(-2 * zenth_k)
    ) - 0.25 * tf.math.log(1 + 0.25 * tf.square(zenth_k)) + tf.math.log(
        1 + 0.24273 * tf.square(zenth_k)) - tf.math.log(1 + 0.43023 *
                                                        tf.square(zenth_k))

    llh_azi = polar_k * cos_azi - lnI0_azi
    llh_zenith = zenth_k * cos_zenth - lnI0_zenth

    loss_azi = reduce_mean(-llh_azi)
    loss_zenith = reduce_mean(-llh_zenith)
    if not re:
        return loss_azi + loss_zenith + loss_energy
    if re:
        return float(loss_azi + loss_zenith + loss_energy), [
            float(loss_energy),
            float(loss_zenith),
            float(loss_azi)
        ]
Example #11
0
def train_bound(t):
    """Trains the model to equalize values and spatial derivatives at boundaries x=5 
    and x=-5 to enforce periodic boundary condition

    Args:
        t : A tf.Tensor of shape (batch_size,).
    """

    x1 = 5 * tf.ones(t.shape)
    x2 = -5 * tf.ones(t.shape)
    with tf.GradientTape(True, False) as tape:
        tape.watch(PINN.trainable_weights)
        with tf.GradientTape(True, False) as grtape1:
            grtape1.watch([t, x1, x2])
            #Automatic differentiation of complex functions is weird in tensorflow
            #so we differentiate real and imaginary parts seperately
            h_real_1 = tfm.real(PINN(tf.stack([t, x1], -1)))
            h_imag_1 = tfm.imag(PINN(tf.stack([t, x1], -1)))
            h_real_2 = tfm.real(PINN(tf.stack([t, x2], -1)))
            h_imag_2 = tfm.imag(PINN(tf.stack([t, x2], -1)))
        #First order derivatives
        h_x1_real = grtape1.gradient(h_real_1, x1)
        h_x1_imag = grtape1.gradient(h_imag_1, x1)
        h_x2_real = grtape1.gradient(h_real_2, x2)
        h_x2_imag = grtape1.gradient(h_imag_2, x2)
        #h1_real and h1_imag have shape (batch_size,2)
        del grtape1
        h1 = tf.complex(h_real_1, h_imag_1)
        h1_x = tf.complex(h_x1_real, h_x1_imag)
        h2 = tf.complex(h_real_2, h_imag_2)
        h2_x = tf.complex(h_x2_real, h_x2_imag)
        MSE = tfm.reduce_mean(
            tfm.pow(tfm.abs(h1 - h2), 2) + tfm.pow(tfm.abs(h1_x - h2_x), 2))
    grads = tape.gradient(MSE, PINN.trainable_weights)
    sgd_opt.apply_gradients(zip(grads, PINN.trainable_weights))
    return MSE
Example #12
0
def loss_funcxpos2(y_reco, y_true, re=False):
    from tensorflow.math import sin, cos, acos, abs, reduce_mean, subtract, square
    # Energy loss
    loss_energy = reduce_mean(abs(subtract(y_reco[:,0], y_true[:,0]))) #this works well but could maybe be improved

    zeni = [cos(y_true[:,1]) - y_reco[:,1] , 
            sin(y_true[:,1]) - y_reco[:,2]]

    azi  = [cos(y_true[:,2]) - y_reco[:,3] , 
            sin(y_true[:,2]) - y_reco[:,4]]

    loss_angle = reduce_mean(square(azi[0]))+reduce_mean(square(azi[1]))+reduce_mean(square(zeni[0]))+reduce_mean(square(zeni[1]))
    if not re:
        return loss_energy+loss_angle
    else:   
        return float(loss_energy+loss_angle), [float(loss_energy), float(loss_angle)]
Example #13
0
def train_init(t, x):
    """Trains the model to have a fixed initial condition when t=0

    Args:
        t: A tf.Tensor of shape (batch_size,)
        x: A tf.Tensor of shape (batch_size,).
    """
    def sech(x):
        return tf.complex(tfm.reciprocal(tfm.cosh(x)), 0)

    with tf.GradientTape() as tape:
        h = PINN(tf.stack([t, x], -1))
        MSE = tfm.reduce_euclidean_norm(tfm.abs(h - sech(x)))
    grads = tape.gradient(MSE, PINN.trainable_weights)
    sgd_opt.apply_gradients(zip(grads, PINN.trainable_weights))
    return MSE
Example #14
0
def abs_linear_unit(y_reco, y_true, re=False):
    ''
    from tensorflow.math import sin, cos, acos, abs, reduce_mean, subtract
    
    #energy loss

    loss_energy = reduce_mean(abs(subtract(y_reco[:,0], y_true[:,0])))
    
    #angle loss
    
    cos_alpha = cos_unit(y_reco,y_true)
    loss_angle = reduce_mean(tf.math.acos(cos_alpha))
    if not re:
        return loss_energy+loss_angle
    else:   
        return float(loss_energy+loss_angle), [float(loss_energy), float(loss_angle)]
Example #15
0
def gradAndSq3D(x):
    """@return |dx|, dx^2"""
    knl = np.zeros([2, 2, 2, 1, 1], dtype=np.float32)
    knl[0, 0, 0] = -1
    dz = np.array(knl)
    dz[1, 0, 0] = 1
    dz = tf.nn.conv3d(x, dz, [1] * 5, "SAME")
    dy = np.array(knl)
    dy[0, 1, 0] = 1
    dy = tf.nn.conv3d(x, dy, [1] * 5, "SAME")
    dx = np.array(knl)
    dx[0, 0, 1] = 1
    dx = tf.nn.conv3d(x, dx, [1] * 5, "SAME")

    gradSq = tfm.abs(dz**2 + dy**2 + dx**2)
    grad = tfm.sqrt(gradSq)
    return grad, gradSq
Example #16
0
 def __init__(self,
              pt_scale: float = 1e-2,
              use_pxyz: bool = True,
              bound: Union[List[float], None] = None):
     """
     Args:
         pt_scale (float, optional): Defaults to 1e-2.
         use_pxyz (bool, optional): Defaults to True.
         bound (list(float), optional): Defaults to None.
     """
     self._pt_scale = pt_scale
     self._use_pxyz = use_pxyz
     if bound is None:
         import math
         pi = math.pi
         bound = [-pi, pi]
     self._period = abs(bound[0] - bound[1])
Example #17
0
def train_colloc(t, x):
    """Trains the model to obey the given PDE at collocation points

    Args:
        t: A tf.Tensor of shape (batch_size,)
        x: A tf.Tensor of shape (batch_size,).
    """
    with tf.GradientTape(True, False) as tape:
        tape.watch(PINN.trainable_weights)
        #Calculate various derivatives of the output
        with tf.GradientTape(True, False) as grtape0:
            grtape0.watch([t, x])
            with tf.GradientTape(True, False) as grtape1:
                grtape1.watch([t, x])
                #Automatic differentiation of complex functions is weird in tensorflow
                #so we differentiate real and imaginary parts seperately
                h_real = tfm.real(PINN(tf.stack([t, x], -1)))
                h_imag = tfm.imag(PINN(tf.stack([t, x], -1)))
            #First order derivatives
            h_x_real = grtape1.gradient(h_real, x)
            h_x_imag = grtape1.gradient(h_imag, x)
            h_t_real = grtape1.gradient(h_real, t)
            h_t_imag = grtape1.gradient(h_imag, t)
            #h1_real and h1_imag have shape (batch_size,2)
            del grtape1
        #Second order derivatives
        h_xx_real = grtape0.gradient(h_x_real, x)
        h_xx_imag = grtape0.gradient(h_x_imag, x)
        del grtape0
        h = tf.complex(h_real, h_imag)
        h_t = tf.complex(h_t_real, h_t_imag)
        h_xx = tf.complex(h_xx_real, h_xx_imag)
        j = tf.complex(0, 1)

        MSE = tfm.reduce_euclidean_norm(
            tfm.abs((j * h_t) + (0.5 * h_xx) + (tfm.conj(h) * h * h)))

    grads = tape.gradient(MSE, PINN.trainable_weights)
    sgd_opt.apply_gradients(zip(grads, PINN.trainable_weights))
    del tape
    return MSE
Example #18
0
def AlphaConstraint(w):
    "Constraints w to range [0,1]"
    w = tfm.abs(w)
    return tfm.minimum(w, 1.0)
Example #19
0
 def update_state(self, y_true, y_pred, sample_weight=None):
     y_true = tf.cast(y_true, tf.float32)
     y_pred = tf.cast(y_pred, tf.float32)
     error = 100 * math.abs(math.subtract(y_true, y_pred)) / y_true
     return super(MAPE, self).update_state(error,
                                           sample_weight=sample_weight)
Example #20
0
def _delta_phi_np(x, y):
    from numpy import abs, minimum
    import math
    pi = math.pi
    d = abs(x - y)
    return minimum(d, 2 * pi - d)
Example #21
0
def _delta_phi_tf(x, y):
    from tensorflow.math import abs, minimum
    import math
    pi = math.pi
    d = abs(x - y)
    return minimum(d, 2 * pi - d)
Example #22
0
def sat_damp(u, uth=1.0, b0=1.0):
    return b0 / (1 + math.abs(u/uth).pow(2))
Example #23
0
def mae(y_true, y_pred):
    mask = y_true[:, :, :, 1]
    y_true = y_true[:, :, :, 0]
    output = (tf_maths.abs(y_true - y_pred)) * mask
    return tf_maths.reduce_sum(output) / tf_maths.reduce_sum(mask)
Example #24
0
def zeni_res(y_true, y_reco):
    diffs = tf.minimum(abs(y_true[:, 1] - y_reco[:, 1]), abs(y_true[:, 1] - y_reco[:, 1])%(np.pi))
    return diffs.numpy()    
Example #25
0
def performance_e_alpha(loader, test_step, metrics, save=False, save_path=''):
    '''Function to test and plot performance of Graph DL
    input should be dom pos x,y,z , time, charge(log10)
    target should be energy(log10),zenith angle, azimuthal angle, NOT unit vec 
    '''
    loss = 0
    prediction_list, target_list = [], []
    for batch in loader:
        inputs, targets = batch
        predictions, targets, out = test_step(inputs, targets)
        loss += out

        prediction_list.append(predictions)
        target_list.append(targets)

    y_reco = tf.concat(prediction_list, axis=0).numpy()
    y_true = tf.concat(target_list, axis=0)
    y_true = tf.cast(y_true, tf.float32).numpy()

    energy = y_true[:, 0]
    counts, bins = np.histogram(energy, bins=10)

    xs = (bins[1:] + bins[:-1]) / 2

    w_energies, u_angles = [], []
    e_sig, alpha_sig = [], []
    old_energy, old_alpha = [], []
    zenith, azimuth = [], []
    for i in range(len(bins) - 1):
        idx = np.logical_and(energy > bins[i], energy < bins[i + 1])

        w, u_angle, old = metrics(y_reco[idx, :], y_true[idx, :])
        old_energy.append(old[0])
        old_alpha.append(old[1])
        w_energies.append(w[1])
        u_angles.append(u_angle[1])
        e_sig.append([w[0], w[2]])
        alpha_sig.append([u_angle[0], u_angle[2]])
        zeni, azi = zeni_alpha(y_reco[idx, :], y_true[idx, :]), azi_alpha(
            y_reco[idx, :], y_true[idx, :])
        zenith.append(zeni)
        azimuth.append(azi)
    zenith, azimuth = np.array(zenith), np.array(azimuth)
    fig, ax = plt.subplots(ncols=3, nrows=4, figsize=(12, 20))
    axesback = [(0, 0), (1, 0), (2, 0), (3, 0)]
    for i, j in axesback:
        a_ = ax[i][j].twinx()
        a_.step(xs, counts, color="gray", zorder=10, alpha=0.7, where="mid")
        a_.set_yscale("log")
        ax[i][j].set_xlabel("Log(E)")

    #structure: my metrics, old metrics, histogram

    # Energy reconstruction
    ax_top = ax[0]

    ax_top[0].errorbar(xs,
                       w_energies,
                       yerr=np.array(e_sig).T,
                       fmt='k.',
                       capsize=2,
                       linewidth=1,
                       ecolor='r',
                       label='data')
    ax_top[0].plot(xs,
                   old_energy,
                   'bo',
                   label=r"$w(\Delta log(E))$" + '(old metric)')
    ax_top[0].set_title("Energy Performance")
    ax_top[0].set_ylabel(r"$\Delta log(E)$")

    # pull_e=(y_reco[:,0]-tf.reduce_mean(y_reco[:,0]))*np.sqrt(np.abs(y_reco[:,3]))
    # ax_top[1].hist(pull_e, label='Pull plot', bins=50, histtype='step')
    # ax_top[1].set_title("Solid angle pull plot)")
    # ax_top[1].set_title("Energy Performance (old metric)")
    # ax_top[1].set_ylabel(r"$w(\Delta log(E))$")

    ax_top[1].hist2d(y_true[:,0], y_reco[:,0], bins=100,\
                   range=[np.percentile(y_true[:,0],[1,99]), np.percentile(y_reco[:,0],[1,99])])
    ax_top[1].set_title("ML Reco/True")
    ax_top[1].set(xlabel="Truth (log(E))", ylabel="ML Reco (log(E))")
    res_e = abs(y_true[:, 0] - y_reco[:, 0])
    ax_top[2].hist2d(np.abs(y_reco[:,3]), res_e, bins=100, \
                   range=[np.percentile(np.abs(y_reco[:,3]),[1,99]), np.percentile(res_e,[1,99])])
    ax_top[2].set_title("ML Kappa correlation with Energy error")
    ax_top[2].set(xlabel=r"$\kappa$", ylabel=r"$\Delta E$")
    for axi in ax_top:
        axi.legend()
    #Zenith reconstructi

    # Alpha reconstruction
    ax_m = ax[1]

    ax_m[0].errorbar(xs,
                     u_angles,
                     yerr=np.array(alpha_sig).T,
                     fmt='k.',
                     capsize=2,
                     linewidth=1,
                     ecolor='r',
                     label=r'Median $\pm \sigma$')
    ax_m[0].plot(xs, old_alpha, 'bo', label=r"$w(\Omega)$" + '(old metric)')
    ax_m[0].set_title("Angle Performance")
    ax_m[0].set_ylabel(r"$\Delta \Omega$")

    alphas = alpha_from_angle(y_reco, y_true)

    pull_alpha = np.array(alphas - tf.reduce_mean(alphas)) * np.sqrt(
        np.abs(y_reco[:, 3]))
    pull_alpha = np.reshape(pull_alpha, -1)
    vals, x, _ = ax_m[1].hist(pull_alpha,
                              label='Pull plot',
                              bins=50,
                              histtype='step',
                              density=1)
    ax_m[1].plot(x, norm.pdf(x, 0, 1))
    ax_m[1].set_title("Solid angle pull plot)")
    # ax_m[1].set_ylabel(r"$w(\Omega)$")


    ax_m[2].hist2d(np.abs(y_reco[:,3]), alphas, bins=100, \
                   range=[np.percentile(np.abs(y_reco[:,3]),[1,99]), np.percentile(alphas,[1,99])])
    ax_m[2].set_title("ML Kappa correlation with angle error")
    ax_m[2].set(xlabel=r"$\kappa$", ylabel=r"$\Delta \Omega$")
    for axi in ax_m:
        axi.legend()
    #Zenith reconstruction
    ax_z = ax[2]

    ax_z[0].errorbar(xs,
                     zenith[:, 1],
                     yerr=[zenith[:, 0], zenith[:, 2]],
                     fmt='k.',
                     capsize=2,
                     linewidth=1,
                     ecolor='r',
                     label=r'Median $\pm \sigma$')
    ax_z[0].set_title("Zenith Performance")
    ax_z[0].plot(xs, zenith[:, 3], 'bo', label='68th')
    ax_z[0].set_ylabel(r"$\Delta \Theta$")

    reszeni = np.abs(y_reco[:, 1] % (np.pi / 2) - y_true[:, 1])
    ax_z[1].hist(reszeni, label="ML reco - Truth", histtype="step", bins=50)
    ax_z[1].hist(y_reco[:, 1] % (np.pi / 2),
                 label="ML reco",
                 histtype="step",
                 bins=50)
    ax_z[1].hist(y_true[:, 1], label="Truth", histtype="step", bins=50)

    ax_z[1].set_title("Zenith Perfomance")
    ax_z[1].set_ylabel(r"$\Theta$")

    ax_z[2].hist2d(np.abs(y_reco[:,3]), reszeni, bins=100,\
                  range=[np.percentile(np.abs(y_reco[:,3]),[1,99]), np.percentile(reszeni,[1,99])])
    ax_z[2].set_title("ML Kappa correlation with zenith error")
    ax_z[2].set(xlabel=r"$\kappa$", ylabel=r"$\Delta \Theta$")
    for axi in ax_z:
        axi.legend()
    #Azimuth reconstruction

    ax_az = ax[3]

    ax_az[0].errorbar(xs,
                      azimuth[:, 1],
                      yerr=[azimuth[:, 0], azimuth[:, 2]],
                      fmt='k.',
                      capsize=2,
                      linewidth=1,
                      ecolor='r',
                      label=r'Median $\pm \sigma$')
    ax_az[0].set_title("Azimuth Performance")
    ax_az[0].plot(xs, azimuth[:, 3], 'bo', label='68th')
    ax_az[0].set_ylabel(r"$\Delta \phi$")

    resazi = np.abs(y_reco[:, 2] % (2 * np.pi) - y_true[:, 2])
    ax_az[1].hist(resazi, label="ML reco - Truth", histtype="step", bins=50)
    ax_az[1].hist(y_reco[:, 2] % (2 * np.pi),
                  label="ML reco",
                  histtype="step",
                  bins=50)
    ax_az[1].hist(y_true[:, 2], label="Truth", histtype="step", bins=50)

    ax_az[1].set_title("Azimuth Perfomance")
    ax_az[1].set_ylabel(r"$\phi$")

    ax_az[2].hist2d(np.abs(y_reco[:,3]), resazi, bins=100,\
                  range=[np.percentile(np.abs(y_reco[:,3]),[1,99]), np.percentile(resazi,[1,99])])
    ax_az[2].set_title("ML Kappa correlation with azimuth error")
    ax_az[2].set(xlabel=r"$\kappa$", ylabel=r"$\Delta \phi$")
    for axi in ax_az:
        axi.legend()
    fig.tight_layout()
    if save:
        plt.savefig(save_path)
    return fig, ax
Example #26
0
def kap_to_sig(kappa):
    kappa = np.sqrt(kappa**2) + eps
    sigs = np.where(
        kappa > 50, np.sqrt(abs(np.sqrt(-2 * np.log(approx(kappa))))),
        np.sqrt(abs(np.sqrt(-2 * np.log(iv(1, kappa) / iv(0, kappa))))))
    return sigs
def define_model():

    ##################################Image-1########################################################

    input_1 = Input(shape=np.shape(X[0][0]), batch_size=None, name="Image_1")

    conv_1_1 = Conv2D(filters=5,
                      kernel_size=(13, 13),
                      strides=1,
                      padding="same",
                      activation="relu",
                      use_bias=True,
                      kernel_initializer="glorot_uniform",
                      kernel_regularizer=l2(0.01))(input_1)

    max_pool_1_1 = MaxPool2D(pool_size=(7, 7), strides=1,
                             padding="valid")(conv_1_1)

    conv_2_1 = Conv2D(filters=4,
                      kernel_size=(9, 9),
                      strides=1,
                      padding="valid",
                      activation="relu",
                      use_bias=True,
                      kernel_initializer="glorot_uniform",
                      kernel_regularizer=l2(0.01))(max_pool_1_1)

    max_pool_2_1 = MaxPool2D(pool_size=(5, 5), strides=1,
                             padding="valid")(conv_2_1)

    norm_1_1 = BatchNormalization()(max_pool_2_1)

    conv_3_1 = Conv2D(filters=5,
                      kernel_size=(3, 3),
                      strides=1,
                      padding="valid",
                      activation="relu",
                      use_bias=True,
                      kernel_initializer="glorot_uniform",
                      kernel_regularizer=l2(0.001))(norm_1_1)

    max_pool_3_1 = MaxPool2D(pool_size=(3, 3), strides=1,
                             padding="valid")(conv_3_1)

    ################################Image-2######################################################

    input_2 = Input(shape=np.shape(X[0][0], ), batch_size=None, name="Image_2")

    conv_1_2 = Conv2D(filters=5,
                      kernel_size=(13, 13),
                      strides=1,
                      padding="same",
                      activation="relu",
                      use_bias=True,
                      kernel_initializer="glorot_uniform",
                      kernel_regularizer=l2(0.01))(input_2)

    max_pool_1_2 = MaxPool2D(pool_size=(7, 7), strides=1,
                             padding="valid")(conv_1_2)

    conv_2_2 = Conv2D(filters=4,
                      kernel_size=(9, 9),
                      strides=1,
                      padding="valid",
                      activation="relu",
                      use_bias=True,
                      kernel_initializer="glorot_uniform",
                      kernel_regularizer=l2(0.01))(max_pool_1_2)

    max_pool_2_2 = MaxPool2D(pool_size=(5, 5), strides=1,
                             padding="valid")(conv_2_2)

    norm_1_2 = BatchNormalization()(max_pool_2_2)

    conv_3_2 = Conv2D(filters=5,
                      kernel_size=(3, 3),
                      strides=1,
                      padding="valid",
                      activation="relu",
                      use_bias=True,
                      kernel_initializer="glorot_uniform",
                      kernel_regularizer=l2(0.001))(norm_1_2)

    max_pool_3_2 = MaxPool2D(pool_size=(3, 3), strides=1,
                             padding="valid")(conv_3_2)

    ##########################################################################################

    lambda_func = Lambda(lambda x: math.abs(x[0] - x[1]))(
        [max_pool_3_1, max_pool_3_2])  #([norm_1_1,norm_1_2])#

    ##############################################################################
    ##########################Full-Connection#####################################

    flat = Flatten()(lambda_func)

    dense_1 = Dense(units=256,
                    activation="relu",
                    use_bias=True,
                    kernel_initializer="glorot_uniform",
                    bias_initializer="glorot_uniform",
                    kernel_regularizer=l2(0.02),
                    activity_regularizer=l2(0.02))(flat)

    drop1 = Dropout(0.4)(dense_1)

    dense_2 = Dense(units=128,
                    activation="relu",
                    use_bias=False,
                    kernel_initializer="glorot_uniform",
                    bias_initializer="glorot_uniform",
                    kernel_regularizer=l2(0.02))(drop1)  #(flat)

    drop2 = Dropout(0.4)(dense_2)

    dense_3 = Dense(units=1,
                    activation="sigmoid",
                    use_bias=False,
                    kernel_initializer="glorot_uniform",
                    bias_initializer="glorot_uniform",
                    kernel_regularizer=l2(0.02))(drop2)

    model = Model(inputs=[input_1, input_2], outputs=dense_3)
    #sgd = tensorflow.optimizers.SGD(lr=0.01, momentum=0.9, decay=0.001)
    opt = Adam(learning_rate=0.0001, beta_1=0.9, beta_2=0.999, amsgrad=True)

    model.compile(optimizer=opt,
                  loss="binary_crossentropy",
                  metrics=["accuracy"])

    return model
Example #28
0
import numpy as np
import pandas
from scipy import fft
from tensorflow import math as tf_math
import torch
import ffht


TOP_K_ABS_FUNC = np.array([lambda arr, K: np.argpartition(np.abs(arr), -K)[-K:],
                           lambda arr, K: tf_math.top_k(tf_math.abs(arr), K, sorted=False)[1].numpy(),
                           lambda arr, K: torch.topk(torch.abs(torch.tensor(arr)), K,
                                                     largest=True, sorted=False).indices.numpy()],
                          dtype=object)


def FastIHT_WHT(y, K, Q, d, Sigma, top_k_func=1):
    """
    Fast iterative hard thresholding algorithm with partial Walsh-Hadamard Transform sensing matrices.
    y : numpy.ndarray
        the measurement vector
    K : int
        number of nonzero entries in the recovered signal
    Q : int
        dimension of y
    d : int
        dimension of the recovered signal, must be a power of 2
    Sigma : numpy.ndarray
        a Q-dimensional array consisting of row indices of the partial WHT matrix
    top_k_fun : {0, 1, 2}
        indicates the function used for computing the top K indices of a vector
        0 - numpy.argpartition
Example #29
0
def zeni_alpha(y_true, y_reco):
    diffs = tf.minimum(abs(y_true[:, 1] - y_reco[:, 1]), abs(y_true[:, 1] - y_reco[:, 1])%(np.pi))
    u_zen = 180 / np.pi * tfp.stats.percentile(diffs, [50-34,50,50+34, 68])
    return u_zen.numpy()
Example #30
0
def gradAndSq2D(x):
    """@return |dx|, dx^2"""
    dy, dx = tf.image.image_gradients(x)
    gradSq = tfm.abs(dy**2 + dx**2)[:, :-1, :-1]
    grad = tfm.sqrt(gradSq)
    return grad, gradSq