Beispiel #1
0
def bayesian_blocks(tt, ttstart, ttstop, p0, bkg_integral_distribution=None):
    """
    Divide a series of events characterized by their arrival time in blocks
    of perceptibly constant count rate. If the background integral distribution
    is given, divide the series in blocks where the difference with respect to
    the background is perceptibly constant.

    :param tt: arrival times of the events
    :param ttstart: the start of the interval
    :param ttstop: the stop of the interval
    :param p0: the false positive probability. This is used to decide the penalization on the likelihood, so this
    parameter affects the number of blocks
    :param bkg_integral_distribution: (default: None) If given, the algorithm account for the presence of the background and
    finds changes in rate with respect to the background
    :return: the np.array containing the edges of the blocks
    """

    # Verify that the input array is one-dimensional
    tt = np.asarray(tt, dtype=float)

    assert tt.ndim == 1

    if bkg_integral_distribution is not None:

        # Transforming the inhomogeneous Poisson process into an homogeneous one with rate 1,
        # by changing the time axis according to the background rate
        logger.debug("Transforming the inhomogeneous Poisson process to a homogeneous one with rate 1...")
        t = np.array(bkg_integral_distribution(tt))
        logger.debug("done")

        # Now compute the start and stop time in the new system
        tstart = bkg_integral_distribution(ttstart)
        tstop = bkg_integral_distribution(ttstop)

    else:

        t = tt
        tstart = ttstart
        tstop = ttstop

    # Create initial cell edges (Voronoi tessellation)
    edges = np.concatenate([[t[0]],
                            0.5 * (t[1:] + t[:-1]),
                            [t[-1]]])

    # Create the edges also in the original time system
    edges_ = np.concatenate([[tt[0]],
                             0.5 * (tt[1:] + tt[:-1]),
                             [tt[-1]]])


    # Create a lookup table to be able to transform back from the transformed system
    # to the original one
    lookup_table = {key: value for (key, value) in zip(edges, edges_)}

    # The last block length is 0 by definition
    block_length = tstop - edges

    if np.sum((block_length <= 0)) > 1:

        raise RuntimeError("Events appears to be out of order! Check for order, or duplicated events.")

    N = t.shape[0]

    # arrays to store the best configuration
    best = np.zeros(N, dtype=float)
    last = np.zeros(N, dtype=int)

    # eq. 21 from Scargle 2012
    prior = 4 - np.log(73.53 * p0 * (N**-0.478))

    logger.debug("Finding blocks...")

    # This is where the computation happens. Following Scargle et al. 2012.
    # This loop has been optimized for speed:
    # * the expression for the fitness function has been rewritten to
    #  avoid multiple log computations, and to avoid power computations
    # * the use of scipy.weave and numexpr has been evaluated. The latter
    #  gives a big gain (~40%) if used for the fitness function. No other
    #  gain is obtained by using it anywhere else

    # Set numexpr precision to low (more than enough for us), which is
    # faster than high
    oldaccuracy = numexpr.set_vml_accuracy_mode('low')
    numexpr.set_num_threads(1)
    numexpr.set_vml_num_threads(1)

    # Speed tricks: resolve once for all the functions which will be used
    # in the loop
    numexpr_evaluate = numexpr.evaluate
    numexpr_re_evaluate = numexpr.re_evaluate

    # Pre-compute this

    aranges = np.arange(N+1, 0, -1)

    for R in range(N):
        br = block_length[R + 1]
        T_k = block_length[:R + 1] - br  # this looks like it is not used, but it actually is,
                                         # inside the numexpr expression

        # N_k: number of elements in each block
        # This expression has been simplified for the case of
        # unbinned events (i.e., one element in each block)
        # It was:
        #N_k = cumsum(x[:R + 1][::-1])[::-1]
        # Now it is:
        N_k = aranges[N - R:]
        # where aranges has been pre-computed

        # Evaluate fitness function
        # This is the slowest part, which I'm speeding up by using
        # numexpr. It provides a ~40% gain in execution speed.

        # The first time we need to "compile" the expression in numexpr,
        # all the other times we can reuse it

        if R == 0:

            fit_vec = numexpr_evaluate('''N_k * log(N_k/ T_k) ''',
                                       optimization='aggressive', local_dict={'N_k': N_k, 'T_k': T_k})

        else:

            fit_vec = numexpr_re_evaluate(local_dict={'N_k': N_k, 'T_k': T_k})

        A_R = fit_vec - prior  # type: np.ndarray

        A_R[1:] += best[:R]

        i_max = A_R.argmax()

        last[R] = i_max
        best[R] = A_R[i_max]

    numexpr.set_vml_accuracy_mode(oldaccuracy)

    logger.debug("Done\n")

    # Now peel off and find the blocks (see the algorithm in Scargle et al.)
    change_points = np.zeros(N, dtype=int)
    i_cp = N
    ind = N

    while True:

        i_cp -= 1

        change_points[i_cp] = ind

        if ind == 0:

            break

        ind = last[ind - 1]

    change_points = change_points[i_cp:]

    edg = edges[change_points]

    # Transform the found edges back into the original time system

    if (bkg_integral_distribution is not None):

        final_edges = map(lambda x: lookup_table[x], edg)

    else:

        final_edges = edg

    # Now fix the first and last edge so that they are tstart and tstop
    final_edges[0] = ttstart
    final_edges[-1] = ttstop

    return np.asarray(final_edges)
Beispiel #2
0

nopt = 100000
price, strike, t = gen_data(nopt)
call = np.zeros(nopt, dtype=np.float64)
put = -np.ones(nopt, dtype=np.float64)


def black_scholes(price, strike, t, rate, vol):
    mr = -rate
    sig_sig_two = vol * vol * 2

    P = price
    S = strike
    T = t

    call = ne.evaluate(
        "P * (0.5 + 0.5 * erf((log(P / S) - T * mr + 0.25 * T * sig_sig_two) * 1/sqrt(T * sig_sig_two))) - S * exp(T * mr) * (0.5 + 0.5 * erf((log(P / S) - T * mr - 0.25 * T * sig_sig_two) * 1/sqrt(T * sig_sig_two))) "
    )
    put = ne.evaluate("call - P + S * exp(T * mr) ")

    return call, put


#ne.set_vml_num_threads(ne.detect_number_of_cores())
ne.set_num_threads(ne.detect_number_of_cores())
ne.set_vml_accuracy_mode('high')

if __name__ == '__main__':
    black_scholes(price, strike, t, RISK_FREE, VOLATILITY)
Beispiel #3
0
import numpy as np
import numexpr as ne
import cupy as cp
from .layer import Layer
import batch_norm_stats_cy
ne.set_vml_accuracy_mode('low')
profile = lambda x: x


class BatchNormLayer(Layer):
    """
    https://arxiv.org/pdf/1502.03167.pdf
    """
    def __init__(self,
                 layer_name,
                 input_dimension=4,
                 incoming_chans=None,
                 run_momentum=0.95,
                 is_on_gpu=True):
        """
        : input_dimension: should be 4 if following eg. convolution, 2 for eg. dense layer
        : incoming_chans: is number of feature maps (channels) for conv layer, 
        or features (cols) for dense layer 
        """
        super().__init__(layer_name)
        self.eps = 1e-5  # Fuzz factor for numerical stability
        self.input_dimension = input_dimension
        self.non_learned_params = {"running_mean": None, "running_std": None}
        self.run_momentum = run_momentum
        if self.input_dimension not in {2, 4}:
            raise ValueError(
Beispiel #4
0
def bayesian_blocks_not_unique(tt, ttstart, ttstop, p0):

    # Verify that the input array is one-dimensional
    tt = np.asarray(tt, dtype=float)

    assert tt.ndim == 1

    # Now create the array of unique times

    unique_t = np.unique(tt)

    t = tt
    tstart = ttstart
    tstop = ttstop

    # Create initial cell edges (Voronoi tessellation) using the unique time stamps

    edges = np.concatenate([[tstart],
                            0.5 * (unique_t[1:] + unique_t[:-1]),
                            [tstop]])

    # The last block length is 0 by definition
    block_length = tstop - edges

    if np.sum((block_length <= 0)) > 1:

        raise RuntimeError("Events appears to be out of order! Check for order, or duplicated events.")

    N = unique_t.shape[0]

    # arrays to store the best configuration
    best = np.zeros(N, dtype=float)
    last = np.zeros(N, dtype=int)

    # Pre-computed priors (for speed)
    # eq. 21 from Scargle 2012

    priors = 4 - np.log(73.53 * p0 * np.power(np.arange(1, N + 1), -0.478))

    # Count how many events are in each Voronoi cell

    x, _ = np.histogram(t, edges)

    # Speed tricks: resolve once for all the functions which will be used
    # in the loop
    cumsum = np.cumsum
    log = np.log
    argmax = np.argmax
    numexpr_evaluate = numexpr.evaluate
    arange = np.arange

    # Decide the step for reporting progress
    incr = max(int(float(N) / 100.0 * 10), 1)

    logger.debug("Finding blocks...")

    # This is where the computation happens. Following Scargle et al. 2012.
    # This loop has been optimized for speed:
    # * the expression for the fitness function has been rewritten to
    #  avoid multiple log computations, and to avoid power computations
    # * the use of scipy.weave and numexpr has been evaluated. The latter
    #  gives a big gain (~40%) if used for the fitness function. No other
    #  gain is obtained by using it anywhere else

    # Set numexpr precision to low (more than enough for us), which is
    # faster than high
    oldaccuracy = numexpr.set_vml_accuracy_mode('low')
    numexpr.set_num_threads(1)
    numexpr.set_vml_num_threads(1)

    for R in range(N):
        br = block_length[R + 1]
        T_k = block_length[:R + 1] - br

        # N_k: number of elements in each block
        # This expression has been simplified for the case of
        # unbinned events (i.e., one element in each block)
        # It was:
        N_k = cumsum(x[:R + 1][::-1])[::-1]
        # Now it is:
        #N_k = arange(R + 1, 0, -1)

        # Evaluate fitness function
        # This is the slowest part, which I'm speeding up by using
        # numexpr. It provides a ~40% gain in execution speed.

        fit_vec = numexpr_evaluate('''N_k * log(N_k/ T_k) ''',
                                   optimization='aggressive',
                                   local_dict={'N_k': N_k, 'T_k': T_k})

        p = priors[R]

        A_R = fit_vec - p

        A_R[1:] += best[:R]

        i_max = argmax(A_R)

        last[R] = i_max
        best[R] = A_R[i_max]

    pass

    numexpr.set_vml_accuracy_mode(oldaccuracy)

    logger.debug("Done\n")

    # Now find blocks
    change_points = np.zeros(N, dtype=int)
    i_cp = N
    ind = N
    while True:
        i_cp -= 1
        change_points[i_cp] = ind

        if ind == 0:
            break

        ind = last[ind - 1]

    change_points = change_points[i_cp:]

    finalEdges = edges[change_points]

    return np.asarray(finalEdges)
Beispiel #5
0
    if expression:
        compare_times(expression, 1)
        sys.exit(0)
    nexpr = 0
    for expr in expressions:
        nexpr += 1
        compare_times(expr, nexpr)
    print


if __name__ == "__main__":
    import numexpr

    numexpr.print_versions()

    numexpr.set_vml_accuracy_mode("low")
    numexpr.set_vml_num_threads(2)

    if len(sys.argv) > 1:
        expression = sys.argv[1]
        print "expression-->", expression
        compare(expression)
    else:
        compare()

    tratios = numpy.array(numpy_ttime) / numpy.array(numexpr_ttime)
    stratios = numpy.array(numpy_sttime) / numpy.array(numexpr_sttime)
    ntratios = numpy.array(numpy_nttime) / numpy.array(numexpr_nttime)

    print "*************** Numexpr vs NumPy speed-ups *******************"
    #     print "numpy total:", sum(numpy_ttime)/iterations
Beispiel #6
0
from basemodel import BaseModel
import numpy as np
import numexpr as ne


if ne.use_vml:
    ne.set_vml_accuracy_mode('fast')

class SIE(BaseModel):
    def __init__(self, b, x0, y0, e, te, s):
        super(SIE, self).__init__(x0, y0, e, te)
        self.b = b
        # replaces core radius from s==0 -> 1e-4, fixes /0 situations in potential calculation.
        self.s = s if s != 0.0 else 1e-4

    def modelargs(self):
        return [self.b, self.x0, self.y0, self.e, self.te, self.s]

    @BaseModel.standard_frame_rotation
    def phiarray(self, x, y, numexpr=True, *args, **kwargs):
        modelargs = self.modelargs()

        if self.e == 0:
            return spherical(x, y, modelargs, numexpr=numexpr)
        else:
            return elliptical(x, y, modelargs, numexpr=numexpr)

def elliptical(x, y, modelargs, numexpr=True):
    b, x0, y0, e, te, s = modelargs[:6]

    x2 = x * x
Beispiel #7
0
def bayesian_blocks(tt, ttstart, ttstop, p0, bkgIntegralDistr=None, myLikelihood=None):
    """Divide a series of events characterized by their arrival time in blocks
    of perceptibly constant count rate. If the background integral distribution
    is given, divide the series in blocks where the difference with respect to
    the background is perceptibly constant.


    Args:
      tt (iterable): An iterable (list, numpy.array...) containing the arrival
                     time of the events.
                     NOTE: the input array MUST be time-ordered, and without
                     duplicated entries. To ensure this, you may execute the
                     following code:

                     tt_array = numpy.asarray(tt)
                     tt_array = numpy.unique(tt_array)
                     tt_array.sort()

                     before running the algorithm.

      p0 (float): The probability of finding a variations (i.e., creating a new
                  block) when there is none. In other words, the probability of
                  a Type I error, i.e., rejecting the null-hypothesis when is
                  true. All found variations will have a post-trial significance
                  larger than p0.

      bkgIntegralDistr (function, optional): the integral distribution for the
                  background counts. It must be a function of the form f(x),
                  which must return the integral number of counts expected from
                  the background component between time 0 and x.

    Returns:
      numpy.array: the edges of the blocks found

    """

    # Verify that the input array is one-dimensional
    tt = np.asarray(tt, dtype=float)

    assert tt.ndim == 1

    if (bkgIntegralDistr is not None):
        # Transforming the inhomogeneous Poisson process into an homogeneous one with rate 1,
        # by changing the time axis according to the background rate
        logger.debug("Transforming the inhomogeneous Poisson process to a homogeneous one with rate 1...")
        t = np.array(bkgIntegralDistr(tt))
        logger.debug("done")

        # Now compute the start and stop time in the new system
        tstart = bkgIntegralDistr(ttstart)
        tstop = bkgIntegralDistr(ttstop)
    else:
        t = tt
        tstart = ttstart
        tstop = ttstop
    pass

    # Create initial cell edges (Voronoi tessellation)
    edges = np.concatenate([[tstart],
                            0.5 * (t[1:] + t[:-1]),
                            [tstop]])

    # Create the edges also in the original time system
    edges_ = np.concatenate([[ttstart],
                             0.5 * (tt[1:] + tt[:-1]),
                             [ttstop]])


    # Create a lookup table to be able to transform back from the transformed system
    # to the original one
    lookupTable = {key: value for (key, value) in zip(edges, edges_)}

    # The last block length is 0 by definition
    block_length = tstop - edges

    if np.sum((block_length <= 0)) > 1:

        raise RuntimeError("Events appears to be out of order! Check for order, or duplicated events.")

    N = t.shape[0]

    # arrays to store the best configuration
    best = np.zeros(N, dtype=float)
    last = np.zeros(N, dtype=int)
    best_new = np.zeros(N, dtype=float)
    last_new = np.zeros(N, dtype=int)

    # Pre-computed priors (for speed)

    if (myLikelihood):

        priors = myLikelihood.getPriors(N, p0)

    else:

        # eq. 21 from Scargle 2012
        #priors = 4 - np.log(73.53 * p0 * np.power(np.arange(1, N + 1), -0.478))

        priors = [4 - np.log(73.53 * p0 * N**(-0.478))] * N
    pass

    x = np.ones(N)

    # Speed tricks: resolve once for all the functions which will be used
    # in the loop
    cumsum = np.cumsum
    log = np.log
    argmax = np.argmax
    numexpr_evaluate = numexpr.evaluate
    arange = np.arange

    # Decide the step for reporting progress
    incr = max(int(float(N) / 100.0 * 10), 1)

    logger.debug("Finding blocks...")

    # This is where the computation happens. Following Scargle et al. 2012.
    # This loop has been optimized for speed:
    # * the expression for the fitness function has been rewritten to
    #  avoid multiple log computations, and to avoid power computations
    # * the use of scipy.weave and numexpr has been evaluated. The latter
    #  gives a big gain (~40%) if used for the fitness function. No other
    #  gain is obtained by using it anywhere else

    times = []
    TSs = []

    # Set numexpr precision to low (more than enough for us), which is
    # faster than high
    oldaccuracy = numexpr.set_vml_accuracy_mode('low')
    numexpr.set_num_threads(1)
    numexpr.set_vml_num_threads(1)

    for R in range(N):
        br = block_length[R + 1]
        T_k = block_length[:R + 1] - br

        # N_k: number of elements in each block
        # This expression has been simplified for the case of
        # unbinned events (i.e., one element in each block)
        # It was:
        # N_k = cumsum(x[:R + 1][::-1])[::-1]
        # Now it is:
        N_k = arange(R + 1, 0, -1)

        # Evaluate fitness function
        # This is the slowest part, which I'm speeding up by using
        # numexpr. It provides a ~40% gain in execution speed.

        fit_vec = numexpr_evaluate('''N_k * log(N_k/ T_k) ''',
                                   optimization='aggressive')

        p = priors[R]

        A_R = fit_vec - p

        A_R[1:] += best[:R]

        i_max = argmax(A_R)

        last[R] = i_max
        best[R] = A_R[i_max]

        # if(myLikelihood):
        #  logger.debug("Maximum old: %i, Maximum new: %i" %(i_max,i_max_new))
        #  logger.debug("Best old: %s, Best new: %s" %(best[R],best_new[R]))

    pass

    numexpr.set_vml_accuracy_mode(oldaccuracy)

    # if(myLikelihood):
    #   from operator import itemgetter
    #   index, element = max(enumerate(TSs), key=itemgetter(1))
    #   t1,t2 = times[index]
    #   print("Maximum TS is %s in time interval %s-%s" %(element,t1,t2))
    #
    #   best = best_new
    #   last = last_new

    # map(oneLoop,range(N))

    logger.debug("Done\n")

    # Now find blocks
    change_points = np.zeros(N, dtype=int)
    i_cp = N
    ind = N
    while True:
        i_cp -= 1
        change_points[i_cp] = ind

        if ind == 0:
            break

        ind = last[ind - 1]

    change_points = change_points[i_cp:]

    edg = edges[change_points]

    # Transform the found edges back into the original time system
    if (bkgIntegralDistr is not None):
        finalEdges = map(lambda x: lookupTable[x], edg)
    else:
        finalEdges = edg
    pass

    return np.asarray(finalEdges)
Beispiel #8
0
def compare(expression=False):
    if expression:
        compare_times(expression, 1)
        sys.exit(0)
    nexpr = 0
    for expr in expressions:
        nexpr += 1
        compare_times(expr, nexpr)
    print


if __name__ == '__main__':
    import numexpr
    numexpr.print_versions()

    numexpr.set_vml_accuracy_mode('low')
    numexpr.set_vml_num_threads(2)

    if len(sys.argv) > 1:
        expression = sys.argv[1]
        print "expression-->", expression
        compare(expression)
    else:
        compare()

    tratios = numpy.array(numpy_ttime) / numpy.array(numexpr_ttime)
    stratios = numpy.array(numpy_sttime) / numpy.array(numexpr_sttime)
    ntratios = numpy.array(numpy_nttime) / numpy.array(numexpr_nttime)

    print "*************** Numexpr vs NumPy speed-ups *******************"
    #     print "numpy total:", sum(numpy_ttime)/iterations
Beispiel #9
0
def bayesian_blocks(tt, ttstart, ttstop, p0, bkg_integral_distribution=None):
    """
    Divide a series of events characterized by their arrival time in blocks
    of perceptibly constant count rate. If the background integral distribution
    is given, divide the series in blocks where the difference with respect to
    the background is perceptibly constant.

    :param tt: arrival times of the events
    :param ttstart: the start of the interval
    :param ttstop: the stop of the interval
    :param p0: the false positive probability. This is used to decide the penalization on the likelihood, so this
    parameter affects the number of blocks
    :param bkg_integral_distribution: (default: None) If given, the algorithm account for the presence of the background and
    finds changes in rate with respect to the background
    :return: the np.array containing the edges of the blocks
    """

    # Verify that the input array is one-dimensional
    tt = np.asarray(tt, dtype=float)

    assert tt.ndim == 1

    if bkg_integral_distribution is not None:

        # Transforming the inhomogeneous Poisson process into an homogeneous one with rate 1,
        # by changing the time axis according to the background rate
        logger.debug(
            "Transforming the inhomogeneous Poisson process to a homogeneous one with rate 1..."
        )
        t = np.array(bkg_integral_distribution(tt))
        logger.debug("done")

        # Now compute the start and stop time in the new system
        tstart = bkg_integral_distribution(ttstart)
        tstop = bkg_integral_distribution(ttstop)

    else:

        t = tt
        tstart = ttstart
        tstop = ttstop

    # Create initial cell edges (Voronoi tessellation)
    edges = np.concatenate([[t[0]], 0.5 * (t[1:] + t[:-1]), [t[-1]]])

    # Create the edges also in the original time system
    edges_ = np.concatenate([[tt[0]], 0.5 * (tt[1:] + tt[:-1]), [tt[-1]]])

    # Create a lookup table to be able to transform back from the transformed system
    # to the original one
    lookup_table = {key: value for (key, value) in zip(edges, edges_)}

    # The last block length is 0 by definition
    block_length = tstop - edges

    if np.sum((block_length <= 0)) > 1:

        raise RuntimeError(
            "Events appears to be out of order! Check for order, or duplicated events."
        )

    N = t.shape[0]

    # arrays to store the best configuration
    best = np.zeros(N, dtype=float)
    last = np.zeros(N, dtype=int)

    # eq. 21 from Scargle 2012
    prior = 4 - np.log(73.53 * p0 * (N**-0.478))

    logger.debug("Finding blocks...")

    # This is where the computation happens. Following Scargle et al. 2012.
    # This loop has been optimized for speed:
    # * the expression for the fitness function has been rewritten to
    #  avoid multiple log computations, and to avoid power computations
    # * the use of scipy.weave and numexpr has been evaluated. The latter
    #  gives a big gain (~40%) if used for the fitness function. No other
    #  gain is obtained by using it anywhere else

    # Set numexpr precision to low (more than enough for us), which is
    # faster than high
    oldaccuracy = numexpr.set_vml_accuracy_mode('low')
    numexpr.set_num_threads(1)
    numexpr.set_vml_num_threads(1)

    # Speed tricks: resolve once for all the functions which will be used
    # in the loop
    numexpr_evaluate = numexpr.evaluate
    numexpr_re_evaluate = numexpr.re_evaluate

    # Pre-compute this

    aranges = np.arange(N + 1, 0, -1)

    for R in range(N):
        br = block_length[R + 1]
        T_k = block_length[:R +
                           1] - br  # this looks like it is not used, but it actually is,
        # inside the numexpr expression

        # N_k: number of elements in each block
        # This expression has been simplified for the case of
        # unbinned events (i.e., one element in each block)
        # It was:
        #N_k = cumsum(x[:R + 1][::-1])[::-1]
        # Now it is:
        N_k = aranges[N - R:]
        # where aranges has been pre-computed

        # Evaluate fitness function
        # This is the slowest part, which I'm speeding up by using
        # numexpr. It provides a ~40% gain in execution speed.

        # The first time we need to "compile" the expression in numexpr,
        # all the other times we can reuse it

        if R == 0:

            fit_vec = numexpr_evaluate('''N_k * log(N_k/ T_k) ''',
                                       optimization='aggressive',
                                       local_dict={
                                           'N_k': N_k,
                                           'T_k': T_k
                                       })

        else:

            fit_vec = numexpr_re_evaluate(local_dict={'N_k': N_k, 'T_k': T_k})

        A_R = fit_vec - prior  # type: np.ndarray

        A_R[1:] += best[:R]

        i_max = A_R.argmax()

        last[R] = i_max
        best[R] = A_R[i_max]

    numexpr.set_vml_accuracy_mode(oldaccuracy)

    logger.debug("Done\n")

    # Now peel off and find the blocks (see the algorithm in Scargle et al.)
    change_points = np.zeros(N, dtype=int)
    i_cp = N
    ind = N

    while True:

        i_cp -= 1

        change_points[i_cp] = ind

        if ind == 0:

            break

        ind = last[ind - 1]

    change_points = change_points[i_cp:]

    edg = edges[change_points]

    # Transform the found edges back into the original time system

    if (bkg_integral_distribution is not None):

        final_edges = map(lambda x: lookup_table[x], edg)

    else:

        final_edges = edg

    # Now fix the first and last edge so that they are tstart and tstop
    final_edges[0] = ttstart
    final_edges[-1] = ttstop

    return np.asarray(final_edges)
Beispiel #10
0
def bayesian_blocks_not_unique(tt, ttstart, ttstop, p0):
    # Verify that the input array is one-dimensional
    tt = np.asarray(tt, dtype=float)

    assert tt.ndim == 1

    # Now create the array of unique times

    unique_t = np.unique(tt)

    t = tt
    tstart = ttstart
    tstop = ttstop

    # Create initial cell edges (Voronoi tessellation) using the unique time stamps

    edges = np.concatenate([[tstart],
                            0.5 * (unique_t[1:] + unique_t[:-1]),
                            [tstop]])

    # The last block length is 0 by definition
    block_length = tstop - edges

    if np.sum((block_length <= 0)) > 1:
        raise RuntimeError("Events appears to be out of order! Check for order, or duplicated events.")

    N = unique_t.shape[0]

    # arrays to store the best configuration
    best = np.zeros(N, dtype=float)
    last = np.zeros(N, dtype=int)

    # Pre-computed priors (for speed)
    # eq. 21 from Scargle 2012

    priors = 4 - np.log(73.53 * p0 * np.power(np.arange(1, N + 1), -0.478))

    # Count how many events are in each Voronoi cell

    x, _ = np.histogram(t, edges)

    # Speed tricks: resolve once for all the functions which will be used
    # in the loop
    cumsum = np.cumsum
    log = np.log
    argmax = np.argmax
    numexpr_evaluate = numexpr.evaluate
    arange = np.arange

    # Decide the step for reporting progress
    incr = max(int(float(N) / 100.0 * 10), 1)

    logger.debug("Finding blocks...")

    # This is where the computation happens. Following Scargle et al. 2012.
    # This loop has been optimized for speed:
    # * the expression for the fitness function has been rewritten to
    #  avoid multiple log computations, and to avoid power computations
    # * the use of scipy.weave and numexpr has been evaluated. The latter
    #  gives a big gain (~40%) if used for the fitness function. No other
    #  gain is obtained by using it anywhere else

    # Set numexpr precision to low (more than enough for us), which is
    # faster than high
    oldaccuracy = numexpr.set_vml_accuracy_mode('low')
    numexpr.set_num_threads(1)
    numexpr.set_vml_num_threads(1)

    with progress_bar(N) as progress:

        for R in range(N):
            br = block_length[R + 1]
            T_k = block_length[:R + 1] - br

            # N_k: number of elements in each block
            # This expression has been simplified for the case of
            # unbinned events (i.e., one element in each block)
            # It was:
            N_k = cumsum(x[:R + 1][::-1])[::-1]
            # Now it is:
            # N_k = arange(R + 1, 0, -1)

            # Evaluate fitness function
            # This is the slowest part, which I'm speeding up by using
            # numexpr. It provides a ~40% gain in execution speed.

            fit_vec = numexpr_evaluate('''N_k * log(N_k/ T_k) ''',
                                       optimization='aggressive',
                                       local_dict={'N_k': N_k, 'T_k': T_k})

            p = priors[R]

            A_R = fit_vec - p

            A_R[1:] += best[:R]

            i_max = argmax(A_R)

            last[R] = i_max
            best[R] = A_R[i_max]

            progress.increase()

    numexpr.set_vml_accuracy_mode(oldaccuracy)

    logger.debug("Done\n")

    # Now find blocks
    change_points = np.zeros(N, dtype=int)
    i_cp = N
    ind = N
    while True:
        i_cp -= 1
        change_points[i_cp] = ind

        if ind == 0:
            break

        ind = last[ind - 1]

    change_points = change_points[i_cp:]

    finalEdges = edges[change_points]

    return np.asarray(finalEdges)
Beispiel #11
0
    if expression:
        compare_times(expression, 1)
        sys.exit(0)
    nexpr = 0
    for expr in expressions:
        nexpr += 1
        compare_times(expr, nexpr)
    print()

if __name__ == '__main__':
    import numexpr
    numexpr.print_versions()

    numpy.seterr(all='ignore')

    numexpr.set_vml_accuracy_mode('low')
    numexpr.set_vml_num_threads(2)

    if len(sys.argv) > 1:
        expression = sys.argv[1]
        print("expression-->", expression)
        compare(expression)
    else:
        compare()

    tratios = numpy.array(numpy_ttime) / numpy.array(numexpr_ttime)
    stratios = numpy.array(numpy_sttime) / numpy.array(numexpr_sttime)
    ntratios = numpy.array(numpy_nttime) / numpy.array(numexpr_nttime)


    print("eval method: %s" % eval_method)
Beispiel #12
0
## The position of smoke is a tuple (x, y, z). Remember x, y and z are all float numbers, not integers.
## Met-condition is a tuple (u,v,z,stab)
## Z-speed is ignored in this version of puff-model. Because of the diffusion coefficients didn't consider the z-wind
## Bundled WRF Processor are HARDCODED, please modify it if needed. "wrf_processor.py" is the only script file licensed under MIT License.
## Some comments are written in Chinese. Please ignore them. They will be translated into English in next release

import threading

import os

import math
import datetime

import numpy
import numexpr
numexpr.set_vml_accuracy_mode("fast")

import logging, sys

from smoke_def import smoke_def
from global_settings import *
CoreMode=str.strip(CORE).lower()
if CoreMode == 'gpu':
    try:
        import gpu_core
        from gpu_core import run_gpu_conc
    except Exception as ex:
        print ex
        cuda_disabled = True
    else:
        cuda_disabled = False