Ejemplo n.º 1
0
def duplicate48Points(
    x0, y0, z0
):  #Find the equivalent points by permuating the indices and adding negative signs.
    [x0, y0] = ary([x0, y0])
    x, y, z = [], [], []

    for n in range(8):
        x.append((-1)**(n >> 2) * x0)
        y.append((-1)**(n >> 1) * y0)
        z.append((-1)**(n >> 0) * z0)
        x.append((-1)**(n >> 2) * x0)
        y.append((-1)**(n >> 1) * z0)
        z.append((-1)**(n >> 0) * y0)
        x.append((-1)**(n >> 2) * y0)
        y.append((-1)**(n >> 1) * x0)
        z.append((-1)**(n >> 0) * z0)
        x.append((-1)**(n >> 2) * y0)
        y.append((-1)**(n >> 1) * z0)
        z.append((-1)**(n >> 0) * x0)
        x.append((-1)**(n >> 2) * z0)
        y.append((-1)**(n >> 1) * x0)
        z.append((-1)**(n >> 0) * y0)
        x.append((-1)**(n >> 2) * z0)
        y.append((-1)**(n >> 1) * y0)
        z.append((-1)**(n >> 0) * x0)

    return ary([x, y, z]).T
Ejemplo n.º 2
0
 def wall_repel(self):
     force = ary([0., 0.])
     # tangential walls' repulsion
     if quadrature(
             self.pos
     ) < radius_min + self.wall_effect_thickness:  # close to the centre of the cable
         direc = self.pos / quadrature(self.pos)
         dist = quadrature(self.pos) - radius_min
         force += direc * self.wall_kernel(
             dist)  #distance from the inner radius wall
     elif quadrature(self.pos) > radius_max - self.wall_effect_thickness:
         direc = -self.pos / quadrature(self.pos)
         dist = radius_max - quadrature(self.pos)
         force += direc * self.wall_kernel(
             dist)  #distance from the outer radius wall
     # radial walls' repulsion
     if self.pos[1] > 1 / sqrt(3) * self.pos[0] - 2 / sqrt(
             3) * self.wall_effect_thickness:
         direc = 0.5 * ary([1, -sqrt(3)])
         dist = direc.dot(self.pos)
         force += direc * self.wall_kernel(dist)
     elif self.pos[1] < -1 / sqrt(3) * self.pos[0] + 2 / sqrt(
             3) * self.wall_effect_thickness:
         direc = 0.5 * ary([1, sqrt(3)])
         dist = direc.dot(self.pos)
         force += direc * self.wall_kernel(dist)
     return force
Ejemplo n.º 3
0
def merge_identical_parent_products(
    object_with_key
):  # can I speed it up by reducing the number of (hidden) for-loops? (matching_reactions is a sort of for loop)
    # I worry that it can only be sped up using Fortran. Not python, IMO.
    # get the parent_product string and mt number string as two list, corresponding to each row in the sigma_df.
    parent_product_list, mt_list = [], []
    for parent_product_mt in object_with_key.index:
        parent_product_list.append("-".join(parent_product_mt.split("-")[:2]))
        mt_list.append(parent_product_mt.split("=")[1])
    parent_product_list, mt_list = ary(parent_product_list), ary(
        mt_list)  # make them into array to make them indexible.

    partial_reaction_array = object_with_key.values
    parent_product_all = ordered_set(parent_product_list)

    sigma_unique = {}
    tprint(
        "Condensing the sigma_xs dataframe to merge together reactions with identical (parent, product) pairs:"
    )
    for parent_product in tqdm(parent_product_all):
        matching_reactions = parent_product_list == parent_product
        mt_name = "-MT=({})".format(",".join(mt_list[matching_reactions]))
        sigma_unique[parent_product +
                     mt_name] = partial_reaction_array[matching_reactions].sum(
                         axis=0)
    del object_with_key
    del partial_reaction_array
    gc.collect()
    return pd.DataFrame(sigma_unique).T
Ejemplo n.º 4
0
 def __init__(self, x, y, interpolation):
     assert np.shape(x) == np.shape(y)
     assert np.ndim(x) == 1
     assert len(interpolation) == len(x) - 1
     self.x = ary(x)
     self.y = ary(y)
     self.interpolation = ary(interpolation)
Ejemplo n.º 5
0
    def definite_integral(self, a, b):
        """
        Definite integral that handles an array of (a, b) vs a scalar pair of (a, b) in different manners.
        The main difference is that (a, b) will be clipped back into range if it exceeds the recorded x-values' range in the array case;
        while such treatment won't happen in the scalar case.
        We might change this later to remove the problem of havin g
        """
        assert (np.diff([a, b], axis=0) >=
                0).all(), "Can only integrate in the positive direction."
        # assert False, "How the f**k are you not catching this shit?"
        if np.not_equal(np.clip(a, self.func.x.min(), self.func.x.max()),
                        a).any():
            if self.verbose:
                print(
                    "Integration limit is below recorded range of x values! Clipping it back into range..."
                )
            a = np.clip(a, self.func.x.min(), self.func.x.max())
        if np.not_equal(np.clip(b, self.func.x.min(), self.func.x.max()),
                        b).any():
            if self.verbose:
                print(
                    "Integration limit is above recorded range of x values! Clipping it back into range..."
                )
            b = np.clip(b, self.func.x.min(), self.func.x.max())

        if isinstance(a, Iterable):
            assert np.shape(a) == np.shape(
                b), "The dimension of (a) must match that of (b)"
            assert ary(a).ndim == 1, "Must be a flat 1D array"
            return self._definite_integral_array(ary(a), ary(b))
        else:
            return self._definite_integral_array(ary([a]), ary([b]))[0]
Ejemplo n.º 6
0
def decay_mat_exp_num_decays(branching_ratios,
                             decay_constants,
                             a,
                             b,
                             c,
                             decay_constant_threshold=1E-23):
    """
    decay_constants : a list of decay_constants
    a : the end time of irradiation (irradiation time starts at t=0), a> 0
    b : the start time of measurement, b> a
    c : the end time of measurement, c> b
    decay_constant_threshold : the threshold below which nuclides are considered as stable.
    """
    if any(ary(decay_constants) <= decay_constant_threshold):
        return 0
    matrix = create_lambda_matrix(decay_constants)
    iden = np.identity(len(decay_constants))
    multiplier = 1 / a * (_expm(
        matrix * (b - a))) @ (_expm(matrix *
                                    (c - b)) - iden) @ (_expm(matrix *
                                                              (a)) - iden)
    inv = np.linalg.inv(matrix)
    initial_population_vector = ary(
        [
            1,
        ] + [0 for _ in decay_constants[1:]]
    )  # initial population of all nuclides = 0 except for the very first isotope, which has 1.0.
    final_fractions = multiplier @ inv @ inv @ initial_population_vector  # result of the (population * dt) integral
    # total number of decays = branching_ratios * the integral * decay constant of that isotope .
    return np.product(
        branching_ratios[1:]
    ) * final_fractions[-1] * decay_constants[
        -1]  # multiplied by its own decay rate will give the number of decays over time period b to c.
Ejemplo n.º 7
0
    def _definite_integral_array(self, a, b):
        n = len(self._area)

        x_array_2d = np.broadcast_to(self.func.x, [len(a), n + 1]).T
        # finding the completely enveloped cells using l_bounds and u_bounds.
        l_bounds = np.broadcast_to(
            self.func.x[:-1],
            [len(a), n
             ]).T  # we don't care whether or not a is larger than the last x
        u_bounds = np.broadcast_to(self.func.x[1:], [
            len(b), n
        ]).T  # we dont' care whether or not b is smaller than the first x.

        # calculate area.
        ge_a = np.greater_equal(
            l_bounds, a).T  # 2D array of bin upper bounds which are >= a.
        le_b = np.less_equal(
            u_bounds, b).T  # 2D array of bin lower bounds which are <= b.
        # use <= and >= instead of < and > to allow the left-edge and right-edge to have zero dx.

        area_2d = np.broadcast_to(self._area, [len(a), n])
        central_area = (area_2d * (-1 + ge_a + le_b)).sum(axis=1)
        # -1 + False + False = -1 (cell envelope entire [a, b] interval);
        # -1 + True + False = 0, -1 + False - True = 0 (cell to the right/left of the entire [a, b] interval respectively);
        # -1 + True + True = +1 ([a, b] interval envelopes entire cell).

        # left-edge half-cell
        l_ind = n - ge_a.sum(axis=1)
        l_edge_x = ary([a, self.func.x[l_ind]])
        l_edge_y = ary([self.func(a), self.func.y[l_ind]])
        l_edge_scheme = self._interpolation[np.clip(
            l_ind - 1, 0, None, dtype=int
        )]  # make sure it doesn't go below zero when ge_a sums to equal n (i.e. a is less than the second x).

        # right-edge half-cell.
        r_ind = le_b.sum(axis=1)
        r_edge_x = ary([self.func.x[r_ind], b])
        r_edge_y = ary([self.func.y[r_ind], self.func(b)])
        r_edge_scheme = self._interpolation[np.clip(r_ind,
                                                    None,
                                                    n - 1,
                                                    dtype=int)]

        # calculate the left-edge half cell and right-edge half cell areas.
        l_edge_area, r_edge_area = np.zeros(len(a)), np.zeros(len(a))
        for scheme_number in INTERPOLATION_SCHEME.keys(
        ):  # loop 5 times (x2 area calculations per loop) to get the l/r edges areas
            matching_l = l_edge_scheme == scheme_number
            matching_r = l_edge_scheme == scheme_number
            l_edge_area[matching_l] = getattr(
                self,
                "area_scheme_" + str(scheme_number))(*l_edge_x.T[matching_l].T,
                                                     *l_edge_y.T[matching_l].T)
            r_edge_area[matching_r] = getattr(
                self,
                "area_scheme_" + str(scheme_number))(*r_edge_x.T[matching_r].T,
                                                     *r_edge_y.T[matching_r].T)

        return l_edge_area + central_area + r_edge_area
Ejemplo n.º 8
0
def cartesian_spherical(x, y, z):	#cartesian unit vectors input spherical output.
	x,y,z = ary(np.clip([x,y,z],-1,1), dtype=float) #change the data type to the desired format

	Theta = arccos((z))
	Phi = arctan(np.divide(y,x))	#This division is going to give nan if (x,y,z) = (0,0,1)
	Phi = np.nan_to_num(Phi)	#Therefore assert phi = 0 if (x,y) = (0,0)
	Phi+= ary( (np.sign(x)-1), dtype=bool)*pi #if x is positive, then phi is on the RHS of the circle; vice versa.
	return ary([Theta, Phi])
Ejemplo n.º 9
0
def apply_mask_on_custom(vel_vec, mask):
    """
    No docstring
    """
    vel_vec_out = []
    for this_slice, mask_line in zip(vel_vec, mask):
        vel_vec_out.append([ vector if required else ary([[],[]]).T for vector, required in zip(this_slice, mask_line)])
    return ary(vel_vec_out)
Ejemplo n.º 10
0
 def picFast(picNum, x, y, xDim, yDim):
     #xDim,yDim = np.shape(picNum)
     if (x in range(xDim)) and (y in range(yDim)):
         pixelValue = ary(picNum[x,
                                 y][:3])  #take only the first three values
         pixelValue = ary([255, 255, 255]) - pixelValue
         return np.append(pixelValue, int(np.mean(pixelValue)))
     else:
         return transparent()
Ejemplo n.º 11
0
def append_axis(x, y):
    if np.ndim(x) == 1:  #assume
        assert np.shape(x) == np.shape(y)
        xy = ary([x, y]).T
    if x.ndim == 2:
        assert np.shape(x)[:1] == np.shape(y)
        xy = [axis for axis in ary(x).T]
        xy.append(ary(y))
        xy = ary(xy).T
    return xy
Ejemplo n.º 12
0
def top_n_sums_of_dict(un_sorted_dict, target_chosen_length, n, largest=True):
    descending_sorted_dict = OrderedDict(sorted(un_sorted_dict.items(), key=itemgetter(1), reverse=True))
    top_n_combos = top_n_sums(list(descending_sorted_dict.values()), target_chosen_length, n, largest)
    selectable_keys = ary(list(descending_sorted_dict.keys()))

    sort_result_as_return_dict = OrderedDict()
    for boolean_mask, sum_value in top_n_combos.items():
        sort_result_as_return_dict[tuple(selectable_keys[ary(boolean_mask)])] = sum_value

    return sort_result_as_return_dict
Ejemplo n.º 13
0
def populate_with_vector(mask, vector):
    """
    Fill in a vector if applicable ('True' in mask).
    2x run of this function (which involves the copying operation)
    using real_data (600x417 points) took only 0.8 seconds.
    so it was deemed that the copying doesn't introduce much inefficency
    and thus is not a point to be optimized.
    """
    vec = ary(vector.copy())
    return ary( [[vec.copy() if required else ary([[],[]]).T for required in mask_line] for mask_line in mask] )
Ejemplo n.º 14
0
def RemoveData(xz, Flux, FluxErr, z_llim, z_ulim=200):
    xz = ary(xz)
    Flux = ary(Flux)
    FluxErr = ary(FluxErr)
    assert xz.ndim == 2 and xz.shape[1] == 2
    assert xz.shape[0] == Flux.shape[0]
    assert xz.shape[0] == FluxErr.shape[0]
    x, z = xz.T
    ind_ary = ary([n for n in range(len(xz)) if z_llim <= z[n] < z_ulim])
    assert len(ind_ary) > 0, "no z fit these limit!"
    return xz[ind_ary], Flux[ind_ary], FluxErr[ind_ary]
Ejemplo n.º 15
0
def ToDataFrame(xz, Flux, FluxErr):
    xz = ary(xz)
    Flux = ary(Flux)
    FluxErr = ary(FluxErr)
    assert xz.ndim == 2 and xz.shape[1] == 2
    assert xz.shape[0] == Flux.shape[0]
    assert xz.shape[0] == FluxErr.shape[0]
    x, z = xz.T
    df = pd.DataFrame(ary([x, z, Flux, FluxErr]).T,
                      columns=["x", "z", "flux", "err"])
    return df
Ejemplo n.º 16
0
def turn_white_to_color(image_array, target_color):
    """
    Takes in a png with RGBA format, keep the transparency of each pixel the same,
    Next, rescale the "white" (FF, FF, FF) into the target_color
    This function works best when the image is only black and white, where all elements that you want to highlight are white.
    """
    decimal_array = ary(image_array, dtype='float')
    decimal_array[:,:,0] *= target_color[0]
    decimal_array[:,:,1] *= target_color[1]
    decimal_array[:,:,2] *= target_color[2]
    return ary(decimal_array, dtype='int64')
Ejemplo n.º 17
0
def RemoveDatax(xz, Flux, FluxErr, x_llim, x_ulim):
    xz = ary(xz)
    Flux = ary(Flux)
    FluxErr = ary(FluxErr)
    assert xz.ndim == 2 and xz.shape[1] == 2
    assert xz.shape[0] == Flux.shape[0]
    assert xz.shape[0] == FluxErr.shape[0]
    x, z = xz.T
    ind_ary = ary([n for n in range(len(xz)) if x_llim <= x[n] < x_ulim])
    assert len(ind_ary) > 0, "no x fits these limits!"
    #print(ind_ary)
    return xz[ind_ary], Flux[ind_ary], FluxErr[ind_ary]
Ejemplo n.º 18
0
def area_between_2_pts(xy1, xy2, xi, scheme):
    x1, y1, x2, y2, xi = np.hstack([xy1, xy2, xi]).flatten()
    assert x1 <= xi <= x2, "xi must be between x1 and x2"
    x_ = xi - x1
    if x1 == x2:
        return 0.0  #catch all cases with zero-size width bins
    if y1 == y2 or scheme == 1:  # histogramic/ flat interpolation
        return y1 * x_  #will cause problems in all log(y) interpolation schemes if not caught

    dy, dx = y2 - y1, x2 - x1
    logy, logx = [bool(int(i)) for i in bin(scheme - 2)[2:].zfill(2)]
    if logx:
        assert (all(ary([x1, x2, xi]) > 0)
                ), "Must use non-zero x values for these interpolation schemes"
        lnx1, lnx2, lnxi = ln(x1), ln(x2), ln(xi)
        dlnx = lnx2 - lnx1
    if logy:
        assert (all(ary([y1, y2]) > 0)
                ), "Must use non-zero y values for these interpolation schemes"
        lny1, lny2 = ln(y1), ln(y2)
        dlny = lny2 - lny1

    if scheme == 2:  # linx, liny
        m = dy / dx
        if xi == x2:
            return dy * dx / 2 + y1 * dx
        else:
            return y1 * x_ + m * x_**2 / 2
    elif scheme == 3:  # logx, liny
        m = dy / dlnx
        if xi == x2:
            return y1 * dx + m * (x2 * dlnx - dx)
        else:
            return y1 * x_ + m * (xi * (lnxi - lnx1) - x_)
            return (y1 - m * lnx1) * x_ + m * (-x_ + xi * lnxi - x1 * lnx1)
    elif scheme == 4:  # linx, logy
        m = dlny / dx
        if xi == x2:
            return 1 / m * dy
        else:
            return 1 / m * y1 * (exp(x_ * m) - 1)
    elif scheme == 5:
        m = dlny / dlnx
        if m == -1:
            return y1 * x1 * (lnxi - lnx1)
        if xi == x2:
            return y1 / (m + 1) * (x2 * (x2 / x1)**m - x1)
        else:
            return y1 / (m + 1) * (xi * (1 + x_ / x1)**m - x1)
    else:
        raise AssertionError(
            "a wrong interpolation scheme {0} is provided".format(scheme))
Ejemplo n.º 19
0
def rotateColorSpace(theta):
    #given a vector pointing at 1,0,0; we want to rotate it around the (1/sqrt(3))*[1,1,1] axis by theta degrees.
    w = cos(theta / 2)
    x = sin(theta / 2) / sqrt(3)
    y = sin(theta / 2) / sqrt(3)
    z = sin(theta / 2) / sqrt(3)

    A = ary([[y * y + z * z, w * z - x * y, -w * y - x * z],
             [-w * z - x * y, x * x + z * z, w * x - y * z],
             [w * y - x * z, -w * x - y * z, x * x + y * y]])
    R = np.identity(3)  # = identity matrix.
    R -= 2 * A
    return (np.round(R @ ary([1, 0, 0])))
Ejemplo n.º 20
0
 def calculate_convoled_population(t):
     """
     Calculates the population at any given time t when a non-flash irradiation schedule is used,
     generated using irradiation duration a={} seconds
     """.format(a)
     vector_uncollapsed = ary([
         +unpy.exp(
             -ary([l * np.clip(t - a, 0, None) for l in decay_constants])),
         -unpy.exp(-ary([l * np.clip(t, 0, None) for l in decay_constants]))
     ],
                              dtype=object)
     vector = np.sum(vector_uncollapsed, axis=0)
     return premultiplying_factor * (multiplying_factors @ vector)
Ejemplo n.º 21
0
def readData(inFile, y_val):
    df = pd.read_csv(inFile, delimiter="\t", header=1)
    dfxlen = len(df["x"])  #shortening the variable
    x = ary([df["x"][n] for n in range(dfxlen) if df["y"][n] == y_val])
    z = ary([df["z"][n] for n in range(dfxlen) if df["y"][n] == y_val])
    count = ary(
        [df["counts"][n] for n in range(dfxlen) if df["y"][n] == y_val])
    time = ary([df["time"][n] for n in range(dfxlen) if df["y"][n] == y_val])

    count_err = ary([sqrt(cnt) for cnt in count])
    CntRate = count / time
    CntRateErr = count_err / time
    return x, z, CntRate, CntRateErr
Ejemplo n.º 22
0
def plot_chi2_line(ax, anchor, singular_dir, shift=[], chi2_mark=[1], sigma_N_meas=None, R=None):
    """
    Plot the singular directions within the viewing cube, fixed at an anchor
    """
    lims = ary(ax.get_w_lims())
    lower_bounds, upper_bounds = lims[::2], lims[1::2]
    min_lambda_point, max_lambda_point = parametric_within_bounds(anchor, unit_vec(singular_dir), lower_bounds, upper_bounds)
    ax.plot(*ary([min_lambda_point, max_lambda_point]).T, label='chi^2=0')
    ax.set_xlim3d(lims[0], lims[1]); ax.set_ylim3d(lims[2], lims[3]); ax.set_zlim3d(lims[4], lims[5])
    for chi2_value in chi2_mark:
        equichi_circle = rotate_around_axis(singular_dir, sigma_N_meas, R, num_points=120)
        ax.plot(*(equichi_circle*sqrt(chi2_value) + anchor).T, label='chi^2='+str(chi2_value)+' circle')
    return ax
Ejemplo n.º 23
0
def RemoveDataPoint(xz, Flux, FluxErr, x_rm, z_rm):
    xz = ary(xz)
    Flux = ary(Flux)
    FluxErr = ary(FluxErr)
    assert xz.ndim == 2 and xz.shape[1] == 2
    assert xz.shape[0] == Flux.shape[0]
    assert xz.shape[0] == FluxErr.shape[0]
    x, z = xz.T
    ind_ary = ary(
        [n for n in range(len(xz)) if (x[n] != x_rm or z[n] != z_rm)])
    assert len(ind_ary) == (len(xz) -
                            1), "more than 1 point removed from the xzdata!!"
    return xz[ind_ary], Flux[ind_ary], FluxErr[ind_ary]
Ejemplo n.º 24
0
def ReadR(fileName):  #Rotation matrix reader.
    f = open(str(fileName))
    Matrices = f.readlines()
    f.close()
    Matrices = np.reshape(Matrices, [-1, 3])
    Matrix = []
    for n in range(len(Matrices)):
        Matrix.append([
            ary(Matrices[n][0].split(), dtype=float),
            ary(Matrices[n][1].split(), dtype=float),
            ary(Matrices[n][2].split(), dtype=float)
        ])
    #np.shape(Matrix) ==(n,3,3)
    return Matrix
Ejemplo n.º 25
0
def decay_mat_exp_population_convolved(
        branching_ratios,
        decay_constants,
        a,
        t,
        decay_constant_threshold: float = 1E-23):
    """
    Separated cases out that that will cause singular matrix or 1/0's.
    """
    if any(ary(decay_constants[:-1]) <= decay_constant_threshold
           ):  # any stable isotope in the chain:
        return 0
    elif len(decay_constants) == 1 and decay_constants[
            0] <= decay_constant_threshold:  # single stable isotope in chain:
        return 1.0
    elif decay_constants[
            -1] <= decay_constant_threshold:  # last isotope is stable; rest of the chain is unstable:
        if t < a:
            raise NotImplementedError(
                "The formula for population during irradiation hasn't been properly derived yet."
            )
        matrix, iden = create_lambda_matrix(
            decay_constants[:-1]), np.identity(len(decay_constants) - 1)
        inv = np.linalg.inv(matrix)
        initial_population_vector = ary([
            1.0,
        ] + [0.0 for _ in decay_constants[1:-1]])

        # during_irradiation_production_matrix = -1/a * inv @ ( a*iden - inv @ (_expm(-matrix*a) - iden) )
        during_irradiation_production_matrix = -inv + 1 / a * (
            _expm(matrix * a) - iden) @ inv @ inv
        during_irradiation_production = (
            during_irradiation_production_matrix @ initial_population_vector
        )[-1] * decay_constants[-2] * np.product(branching_ratios)
        post_irradiation_production = decay_mat_exp_num_decays(
            branching_ratios, decay_constants[:-1], a, a, t)
        return during_irradiation_production + post_irradiation_production
    else:  # all unstable:
        matrix, iden = create_lambda_matrix(decay_constants), np.identity(
            len(decay_constants))
        inv = np.linalg.inv(matrix)
        initial_population_vector = ary([
            1.0,
        ] + [0.0 for _ in decay_constants[1:]])

        transformation = 1 / a * _expm(matrix * np.clip(t - a, 0, None)) @ (
            _expm(matrix * np.clip(t, 0, a)) - iden) @ inv
        final_fractions = transformation @ initial_population_vector
        return np.product(branching_ratios[1:]) * final_fractions[-1]
Ejemplo n.º 26
0
 def test_exclude_transect_with_dataset_2(self):
     """
     test that _exclude_transect excludes only the specified transects
     """
     excludeA = ('Jantang', 1)
     excludeB = [('Jantang', 1), ('Kuala Merisi', 2)]
     slKey = {1: "Jantang", 2: "Kuala Merisi"}
     t2A = tdb.Transect(self.x2, self.slc2, self.tsc2, self.dts2, 
                        exclude=excludeA, slKey=slKey)
     t2B = tdb.Transect(self.x2, self.slc2, self.tsc2, self.dts2, 
                        exclude=excludeB, slKey=slKey)
     assert_array_equal(t2A.sds, ary([10, 2, 3, 3, 0, 20]))
     assert_array_equal(t2A.x, ary([3., 4, 6, 7, 8, 9]))
     assert_array_equal(t2B.sds, ary([10, 2, 3, 3]))
     assert_array_equal(t2B.ind, ary([5, 4, 7, 8]))
Ejemplo n.º 27
0
def tessellate_circle(num_sample):
    #create hexagonal packing of circles:
    r = get_radius_given_number(num_sample)
    xspace = 2 * r
    yspace = 2 * sqrt(3) * r
    full_rows = ary(
        np.meshgrid(np.arange(-1 + r, 1, xspace),
                    np.arange(-1 + r, 1, yspace))).T.reshape([-1, 2])
    in_between_rows = ary(
        np.meshgrid(np.arange(-1 + 2 * r, 1, xspace),
                    np.arange(-1 + (1 + sqrt(3)) * r, 1,
                              yspace))).T.reshape([-1, 2])
    tessellated_square = np.concatenate([full_rows, in_between_rows])
    mask = [quadrature(point) < 1 - 0.8 * r for point in tessellated_square]
    return tessellated_square[mask]
Ejemplo n.º 28
0
def fit(x, y, yerr, cov=False, verbose=False):
    (m, c), residual, _rank, _sv, _rcond = np.polyfit(x,
                                                      y,
                                                      w=1 / ary(yerr),
                                                      deg=1,
                                                      full=True)
    if verbose:
        print("rank=", _rank)
        print("singular values =", _sv)
        print("condition number=", _rcond)
    if cov:
        m_c, cov_matr = np.polyfit(x, y, w=1 / ary(yerr), deg=1, cov=cov)
        return m_c, cov_matr
    fit_func = lambda x: m * ary(x) + c
    return fit_func, get_rms_residuals(fit_func, x, y, yerr)
Ejemplo n.º 29
0
def ReadSurfTall(NameOfTally):
    #read from file
    TallyStats = ReadBlockOfText(
        fname,
        trigger1="1tally fluc",
        length=12,
        ScrollMore=1,
    )

    #Separate the name of the first row from the body
    tallynames = TallyStats[0][
        1::2]  #Get the names of the tallies in the first row
    TallyStats = TallyStats[1:]  #Cut out the first row

    #Separate the first row from the rest.
    namelist = TallyStats[0][1:]  #ignore the first element "nps"
    TallyStats = ary(TallyStats[1:]).T  #reshape it by transposing it.

    #create a namelist without duplicates
    cutlist = []
    [cutlist.append(s) for s in namelist
     if s not in cutlist]  #Implicit for loop without an explicit output

    #separate x values (nps) from the y-values (everything else.)
    nps, TallyStats = [
        ary(TallyStats[0], dtype=int),
        ary(TallyStats[1:], dtype=float)
    ]  #cut out the first row
    NumTallies = len(namelist) / len(
        cutlist)  #Check how many times does namelist repeat itself
    assert int(
        NumTallies
    ) == NumTallies, "I suck at programming because there are more columns outputted by this tally table than I expected"
    TallyStats = np.reshape(
        TallyStats,
        [int(NumTallies), -1, np.shape(TallyStats)[1]
         ])  #reshaping it, while preserving the last dimension

    #Dictionary
    ListOfTallyDict = []
    for tal in TallyStats:
        ListOfTallyDict.append(ConvToDict(tal, cutlist))
    for i in range(len(ListOfTallyDict)):
        ListOfTallyDict[i]["nps"] = nps
        ListOfTallyDict[i]["tally"] = tallynames[i]
    for D in ListOfTallyDict:
        if D["tally"] == NameOfTally:
            return D
Ejemplo n.º 30
0
def uglyAverage(qList):  #Simply take the renomalized average.
    qList = ary(qList)  #Turn into array if not already one.

    average = np.zeros(4)
    for n in range(4):
        average[n] = np.average(qList[:, n])  #find the average in each column.
    return normalize(average)
Ejemplo n.º 31
0
def read_tab2(in_file_path):
    """takes in a block of text
    and sort according to the activities
    """
    # sniff the file for the point where the transition happens
    nuclides, activities, doses = [], [], []
    with open(in_file_path) as tab2:
        skiprows=0
        for line in tab2:
            if line.startswith("  TIME"):
                skiprows += 1
            else:
                nuclides.append(line[2:8])
                activities.append(float(line[8:22]))
                doses.append(float(line[22:36]))
    table = pd.DataFrame(
        ary([activities, doses]).T,
        columns=["activities(Bq)", "doses(Sv/hr)"],
        index=nuclides,
        )
    table.sort_values("activities(Bq)", inplace=True, ascending=False)
    total = table.sum(axis=0)
    total.name = "total"
    table = table.append(total)
    return table