예제 #1
0
  def _matrix_conv(self, m1, m2):
    """Matrix convolution.

    Args:
      m1: is a k x k x k  dictionary, each element is a n x n matrix.
      m2: is a l x l x l dictionary, each element is a n x n matrix.

    Returns:
      (k + l - 1) x (k + l - 1) x (k + l - 1) dictionary each
      element is a n x n matrix.
    Raises:
      ValueError: if the entries of m1 and m2 are of different dimensions.
    """

    n = (m1[0, 0, 0]).shape.as_list()[0]
    if n != (m2[0, 0, 0]).shape.as_list()[0]:
      raise ValueError("The entries in matrices m1 and m2 "
                       "must have the same dimensions!")
    k = int(np.cbrt(len(m1)))
    l = int(np.cbrt(len(m2)))
    result = {}
    size = k + l - 1
    # Compute matrix convolution between m1 and m2.
    for i in range(size):
      for j in range(size):
        for r in range(size):
          result[i, j, r] = array_ops.zeros([n, n], self.dtype)
          for index1 in range(min(k, i + 1)):
            for index2 in range(min(k, j + 1)):
              for index3 in range(min(k, r + 1)):
                if (i - index1) < l and (j - index2) < l and (r - index3) < l:
                  result[i, j, r] += math_ops.matmul(m1[index1, index2, index3],
                                                     m2[i - index1, j - index2,
                                                        r - index3])
    return result
예제 #2
0
파일: mc1.py 프로젝트: desoo40/mai_labs
def main():
    funcs = [lambda x: 1 / (x + np.cbrt(x)), \
             lambda x: 1 / math.sqrt(x ** 2 + 3.22), \
             lambda x: math.sin(x) ** 3, \
             lambda x: 1 / (1 + math.sin(x)), \
             lambda x: math.log(x + 2) / x, \
             lambda x: math.log(x) / (x ** 2)
    ]
    monte_funcs = [funcs[0], \
                   lambda x: funcs[0](x[0]) * funcs[1](x[1]), \
                   lambda x: funcs[0](x[0]) * funcs[1](x[1]) * funcs[2](x[2]), \
                   lambda x: funcs[0](x[0]) * funcs[1](x[1]) * funcs[2](x[2]) * funcs[3](x[3]), \
                   lambda x: funcs[0](x[0]) * funcs[1](x[1]) * funcs[2](x[2]) * funcs[3](x[3]) * funcs[4](x[4]), \
                   lambda x: funcs[0](x[0]) * funcs[1](x[1]) * funcs[2](x[2]) * funcs[3](x[3]) * funcs[4](x[4]) * funcs[5](x[5])
    ]
    anal_funcs = [lambda x: 3 * math.log(np.cbrt(x ** 2) + 1) / 2,\
                  lambda x: math.log(np.abs(x + math.sqrt(x ** 2 + 3.22))),\
                  lambda x: (math.cos(3 * x) - 9 *math.cos(x)) / 12,\
                  lambda x: -2 / (math.tan(x / 2) + 1),
                  lambda x: 2 * 0.648437 if x == 2 else 0.648437,
                  lambda x: -(math.log(x) + 1) / x
        ]
    intervals = [(-3, -1), (1, 3), (3, 5), (5, 7), (1.2, 2), (14, 88)]

    print("Format in methods is (value, time in seconds)")

    for h, experiments_amount in [(0.1, 100), (0.01, 1000)]:
        bench_methods(funcs, monte_funcs, anal_funcs, intervals, h, experiments_amount)
예제 #3
0
	def countOptimalBins(self):
		num = self.dist.shape[0]
		if num < 100:
			if num % 2 == 0:
				num_b = np.sqrt(num) - 1
			else:
				num_b = np.sqrt(num)
		else:
			if num % 2 == 0:
				num_b = np.cbrt(num) - 1
			else:
				num_b = np.cbrt(num)
		return int(np.floor(num_b))
예제 #4
0
    def calc_prior_n(self, damping_factor=5.):
        n_infer = np.zeros((self.students, self.subtopics))
        k_infer = np.zeros((self.students, self.subtopics))
        for i, node in enumerate(tree.nodes):
            n = self.nodes[node][1, :]
            p = self.nodes[node][2, :] / (n + 1e-12)
            for j in range(self.subtopics):
                nmeasure = n * self.tree.damp_factor[i, j]
                pmeasure = p * self.tree.pair_cond_true[i, j] + (1 - p) * self.tree.pair_cond_false[i, j]
                n_infer[:, j] += nmeasure
                k_infer[:, j] += nmeasure * pmeasure

        alpha, beta = k_infer + 1., n_infer - k_infer + 1.
        means = alpha / (alpha + beta)
        weights = (n_infer + 1.)
        EX2 = np.sum(weights * (alpha * beta / (alpha + beta) ** 2 / (alpha + beta + 1) + means ** 2), axis=0) / np.sum(
            weights, axis=0)
        EX = np.sum(weights * means, axis=0) / np.sum(weights, axis=0)
        variance = EX2 - EX ** 2

        p = np.array([node.prob for node in self.tree.nodes])
        b = 7. - p * (1. - p) / variance
        c = 16. - 1. / variance
        d = 12. - 1. / variance
        D0 = b ** 2 - 3. * c
        D1 = 2. * b ** 3 - 9. * b * c + 27. * d
        C = np.cbrt((D1 + np.sqrt(D1 ** 2 - 4. * D0 ** 3)) / 2.)
        ns = -(b + C + D0 / C) / 3.

        ns /= damping_factor
        for i, node in enumerate(tree.nodes):
            node.n_prior = 0 if np.isnan(ns[i]) else ns[i]
예제 #5
0
    def test_unary_ufuncs(self):

        v = BlockVector(2)
        a = np.ones(3) * 0.5
        b = np.ones(2) * 0.8
        v[0] = a
        v[1] = b

        v2 = BlockVector(2)

        unary_funcs = [np.log10, np.sin, np.cos, np.exp, np.ceil,
                       np.floor, np.tan, np.arctan, np.arcsin,
                       np.arccos, np.sinh, np.cosh, np.abs,
                       np.tanh, np.arcsinh, np.arctanh,
                       np.fabs, np.sqrt, np.log, np.log2,
                       np.absolute, np.isfinite, np.isinf, np.isnan,
                       np.log1p, np.logical_not, np.exp2, np.expm1,
                       np.sign, np.rint, np.square, np.positive,
                       np.negative, np.rad2deg, np.deg2rad,
                       np.conjugate, np.reciprocal]

        for fun in unary_funcs:
            v2[0] = fun(v[0])
            v2[1] = fun(v[1])
            res = fun(v)
            self.assertIsInstance(res, BlockVector)
            self.assertEqual(res.nblocks, 2)
            for i in range(2):
                self.assertTrue(np.allclose(res[i], v2[i]))

        other_funcs = [np.cumsum, np.cumprod, np.cumproduct]

        for fun in other_funcs:
            res = fun(v)
            self.assertIsInstance(res, BlockVector)
            self.assertEqual(res.nblocks, 2)
            self.assertTrue(np.allclose(fun(v.flatten()), res.flatten()))

        with self.assertRaises(Exception) as context:
            np.cbrt(v)
예제 #6
0
    def geostationary(
        cls, attractor, angular_velocity=None, period=None, hill_radius=None
    ):
        """Return the geostationary orbit for the given attractor and its rotational speed.

        Parameters
        ----------
        attractor : Body
            Main attractor.
        angular_velocity : ~astropy.units.Quantity
            Rotational angular velocity of the attractor.
        period : ~astropy.units.Quantity
            Attractor's rotational period, ignored if angular_velocity is passed.
        hill_radius : ~astropy.units.Quantity
            Radius of Hill sphere of the attractor (optional). Hill sphere radius(in
            contrast with Laplace's SOI) is used here to validate the stability of the
            geostationary orbit, that is to make sure that the orbital radius required
            for the geostationary orbit is not outside of the gravitational sphere of
            influence of the attractor.
            Hill SOI of parent(if exists) of the attractor is ignored if hill_radius is not provided.
        """

        if angular_velocity is None and period is None:
            raise ValueError(
                "At least one among angular_velocity or period must be passed"
            )

        if angular_velocity is None:
            angular_velocity = 2 * np.pi / period

        # Find out geostationary radius using r = cube_root(GM/(angular
        # velocity)^2)
        with u.set_enabled_equivalencies(u.dimensionless_angles()):
            geo_radius = np.cbrt(attractor.k / np.square(angular_velocity.to(1 / u.s)))

        if hill_radius is not None and geo_radius > hill_radius:
            raise ValueError(
                "Geostationary orbit for the given parameters doesn't exist"
            )

        altitude = geo_radius - attractor.R
        return cls.circular(attractor, altitude)
예제 #7
0
 def discretize(self, column):
     print("DEB Discretize column " + column)
     sorted_col = sorted(column)
     l = len(column)
     n = int(numpy.floor(l / 2))
     if l % 2 == 0:
         median_1 = numpy.median(sorted_col[0:n])
         median_2 = numpy.median(sorted_col[n:])
     else:
         median_1 = numpy.median(sorted_col[0:(n + 1)])
         median_2 = numpy.median(sorted_col[(n + 1):])
     iqr = median_2 - median_1
     h = 2 * iqr * (1 / numpy.cbrt(l))
     if h > 0:
         bins_number = numpy.ceil((column.max() - column.min()) / h)
         new_col, bins = pandas.cut(column, bins_number, labels=False, retbins=True, include_lowest=False)
     else:
        new_col = column
        bins = []
     return new_col, bins
예제 #8
0
파일: main.py 프로젝트: xavi1989/cs231n
def form_initial_voxels(xlim, ylim, zlim, num_voxels):
    # TODO: Implement this method!
    x_dim = xlim[-1] - xlim[0]
    y_dim = ylim[-1] - ylim[0]
    z_dim = zlim[-1] - zlim[0]
    total_volume = x_dim * y_dim * z_dim

    voxel_volume = float(total_volume / num_voxels)
    voxel_size = np.cbrt(voxel_volume)

    x_voxel_num = np.round(x_dim / voxel_size)
    y_voxel_num = np.round(y_dim / voxel_size)
    z_voxel_num = np.round(z_dim / voxel_size)

    x_coor = np.linspace(xlim[0]+0.5*voxel_size, xlim[0]+(0.5+x_voxel_num-1)*voxel_size, x_voxel_num)
    y_coor = np.linspace(ylim[0]+0.5*voxel_size, ylim[0]+(0.5+y_voxel_num-1)*voxel_size, y_voxel_num)
    z_coor = np.linspace(zlim[0]+0.5*voxel_size, zlim[0]+(0.5+z_voxel_num-1)*voxel_size, z_voxel_num)

    XX, YY, ZZ = np.meshgrid(x_coor, y_coor, z_coor)
    voxels = np.vstack((XX.reshape(-1), YY.reshape(-1), ZZ.reshape(-1))).reshape(3, -1).T

    return voxels, voxel_size
예제 #9
0
    def sample(number_factors, n_time_steps=50, scenario_type=Collisive, kernel="SingleCPU"):
        """Helper function to sample a scenario for varying number of particles at constant density

        :param number_factors: array of factors that will be applied to the scenarios, the length of this determines the number of samples/simulations
        :param n_time_steps: how many time steps shall be performed for each sample/simulation
        :param scenario_type: determines which scenario shall be run,
        :param kernel: string determining the compute kernel of the simulation
        :return: performance results (times, counts) and unit-less system variables, these are all dictionaries
        """
        n_samples = len(number_factors)
        times = get_empty_result_container(n_samples)
        counts = get_empty_result_container(n_samples)
        system_vars = get_empty_system_variables(n_samples)
        box_length_factors = np.cbrt(number_factors)
        for i, _ in enumerate(number_factors):
            factors = get_identity_factors()
            factors["n_particles"] = number_factors[i]
            factors["box_length"] = box_length_factors[i]
            scenario = scenario_type(kernel, factors)
            scenario.run(n_time_steps)
            scenario.set_system_vars(system_vars, i)
            scenario.set_times(times, i)
            scenario.set_counts(counts, i)
        return times, counts, system_vars
    columns=X_train.columns,
    index=trafo_names)  # Anderson Darling for all feature transformations
KSstats = pd.DataFrame(
    np.zeros((len(trafo_names), X_train.shape[1])),
    columns=X_train.columns,
    index=trafo_names)  # Kolmogorov Smirnov feature transformations

# again: only use training data for the tests!

for i in range(X_train.shape[1]):
    feat = X_train.iloc[:, i]
    natlog = np.log(feat + 1)
    log2 = np.log2(feat + 1)
    log10 = np.log10(feat + 1)
    sqrt = np.sqrt(feat)
    cbrt = np.cbrt(feat)

    df_list = list([feat, natlog, log2, log10, sqrt, cbrt])

    for j, trafo in zip(range(len(trafo_names)), df_list):
        kurtstats.iloc[j, i] = trafo.kurt()
        skewstats.iloc[j, i] = trafo.skew()
        stdstats.iloc[j, i] = trafo.std()

        stat1, pval1 = stats.shapiro(trafo)
        stat2, pval2 = stats.jarque_bera(trafo)
        AD, crit, sig = stats.anderson(trafo, dist='norm')
        stat4, pval4 = stats.kstest(trafo, 'norm')

        AD_adj = AD * (1 + (.75 / 50) + 2.25 / (50**2))
예제 #11
0
    def __init__(self, c, Y_b, L_A, whitepoint=whitepoints_cie1931["D65"]):
        # step0: Calculate all values/parameters which are independent of input
        #        samples
        Y_w = whitepoint[1]

        # Nc and F are modelled as a function of c, and can be linearly interpolated.
        c_vals = [0.525, 0.59, 0.69]
        F_Nc_vals = [0.8, 0.9, 1.0]
        assert 0.535 <= c <= 0.69
        F = np.interp(c, c_vals, F_Nc_vals)
        self.c = c
        self.N_c = F

        self.M_cat02 = np.array(
            [
                [+0.7328, +0.4296, -0.1624],
                [-0.7036, +1.6975, +0.0061],
                [+0.0030, +0.0136, +0.9834],
            ]
        )
        RGB_w = np.dot(self.M_cat02, whitepoint)

        D = F * (1 - 1 / 3.6 * np.exp((-L_A - 42) / 92))
        D = min(D, 1.0)
        D = max(D, 0.0)

        self.D_RGB = D * Y_w / RGB_w + 1 - D

        k = 1 / (5 * L_A + 1)
        k4 = k * k * k * k
        l4 = 1 - k4
        self.F_L = k4 * L_A + 0.1 * l4 * l4 * np.cbrt(5 * L_A)

        self.n = Y_b / Y_w
        self.z = 1.48 + np.sqrt(self.n)
        self.N_bb = 0.725 / self.n ** 0.2
        self.N_cb = self.N_bb

        RGB_wc = self.D_RGB * RGB_w

        self.M_hpe = np.array(
            [
                [+0.38971, +0.68898, -0.07868],
                [-0.22981, +1.18340, +0.04641],
                [+0.00000, +0.00000, +1.00000],
            ]
        )
        RGB_w_ = np.dot(self.M_hpe, np.linalg.solve(self.M_cat02, RGB_wc))

        alpha = (self.F_L * RGB_w_ / 100) ** 0.42
        RGB_aw_ = 400 * alpha / (alpha + 27.13)
        self.A_w = np.dot([2, 1, 1 / 20], RGB_aw_) * self.N_bb

        self.h = np.array([20.14, 90.00, 164.25, 237.53, 380.14])
        self.e = np.array([0.8, 0.7, 1.0, 1.2, 0.8])
        self.H = np.array([0.0, 100.0, 200.0, 300.0, 400.0])

        # Merge a bunch of matrices together here.
        self.M_ = np.dot(
            self.M_hpe,
            np.linalg.solve(self.M_cat02, (self.M_cat02.T * self.D_RGB).T),
        )
        # Alternative: LU decomposition. That introduces a scipy dependency
        # though and lusolve is slower than dot() as well.
        self.invM_ = np.linalg.inv(self.M_)
예제 #12
0
    def test_scafacos(self):
        s = self.system
        rho = 0.3

        # This is only for box size calculation. The actual particle number is
        # lower, because particles are removed from the mdlc gap region
        n_particle = 100

        particle_radius = 0.5

        box_l = np.cbrt(4 * n_particle * np.pi / (3 * rho)) * particle_radius
        s.box_l = 3 * [box_l]

        for dim in (2, 1):
            print("Dimension", dim)

            # Read reference data
            if dim == 2:
                file_prefix = "data/mdlc"
                s.periodicity = [1, 1, 0]
            else:
                s.periodicity = [1, 0, 0]
                file_prefix = "data/scafacos_dipoles_1d"

            ref_E_path = abspath(file_prefix + "_reference_data_energy.dat")
            ref_E = float(np.genfromtxt(ref_E_path))

            # Particles
            data = np.genfromtxt(
                abspath(file_prefix + "_reference_data_forces_torques.dat"))
            s.part.add(pos=data[:, 1:4], dip=data[:, 4:7])
            s.part[:].rotation = (1, 1, 1)

            if dim == 2:
                scafacos = magnetostatics.Scafacos(
                    prefactor=1,
                    method_name="p2nfft",
                    method_params={
                        "p2nfft_verbose_tuning": 0,
                        "pnfft_N": "80,80,160",
                        "pnfft_window_name": "bspline",
                        "pnfft_m": "4",
                        "p2nfft_ignore_tolerance": "1",
                        "pnfft_diff_ik": "0",
                        "p2nfft_r_cut": "6",
                        "p2nfft_alpha": "0.8",
                        "p2nfft_epsB": "0.05"
                    })
                s.actors.add(scafacos)
                # change box geometry in x,y direction to ensure that
                # scafacos survives it
                s.box_l = np.array((1, 1, 1.3)) * box_l

            else:
                # 1d periodic in x
                scafacos = magnetostatics.Scafacos(
                    prefactor=1,
                    method_name="p2nfft",
                    method_params={
                        "p2nfft_verbose_tuning": 1,
                        "pnfft_N": "32,128,128",
                        "pnfft_direct": 0,
                        "p2nfft_r_cut": 2.855,
                        "p2nfft_alpha": "1.5",
                        "p2nfft_intpol_order": "-1",
                        "p2nfft_reg_kernel_name": "ewald",
                        "p2nfft_p": "16",
                        "p2nfft_ignore_tolerance": "1",
                        "pnfft_window_name": "bspline",
                        "pnfft_m": "8",
                        "pnfft_diff_ik": "1",
                        "p2nfft_epsB": "0.125"
                    })
                s.box_l = np.array((1, 1, 1)) * box_l
                s.actors.add(scafacos)
            s.integrator.run(0)

            # Calculate errors

            err_f = self.vector_error(s.part[:].f, data[:, 7:10])
            err_t = self.vector_error(s.part[:].torque_lab, data[:, 10:13])
            err_e = s.analysis.energy()["dipolar"] - ref_E

            tol_f = 2E-3
            tol_t = 2E-3
            tol_e = 1E-3

            self.assertLessEqual(abs(err_e), tol_e,
                                 "Energy difference too large")
            self.assertLessEqual(abs(err_t), tol_t,
                                 "Torque difference too large")
            self.assertLessEqual(abs(err_f), tol_f,
                                 "Force difference too large")

            s.part.clear()
            s.actors.clear()
예제 #13
0
파일: mt.py 프로젝트: tranmartin45/elowitz
def freedman_diaconis_bins(data):
    """Number of bins based on Freedman-Diaconis rule."""
    h = 2 * (np.percentile(data, 75) - np.percentile(data, 25)) / np.cbrt(
        len(data))
    return int(np.ceil((data.max() - data.min()) / h))
예제 #14
0
    def __init__(self, vol_bnds, voxel_size, use_gpu=True):
        """Constructor.

    Args:
      vol_bnds (ndarray): An ndarray of shape (3, 2). Specifies the
        xyz bounds (min/max) in meters.
      voxel_size (float): The volume discretization in meters.
    """
        vol_bnds = np.asarray(vol_bnds)
        assert vol_bnds.shape == (
            3, 2), "[!] `vol_bnds` should be of shape (3, 2)."

        # Define voxel volume parameters
        self._vol_bnds = vol_bnds
        self._voxel_size = float(voxel_size)
        self._trunc_margin = 5 * self._voxel_size  # truncation on SDF
        self._color_const = 256 * 256

        # Adjust volume bounds and ensure C-order contiguous
        self._vol_dim = np.ceil((self._vol_bnds[:, 1] - self._vol_bnds[:, 0]) /
                                self._voxel_size).copy(order='C').astype(int)
        self._vol_bnds[:,
                       1] = self._vol_bnds[:,
                                           0] + self._vol_dim * self._voxel_size
        self._vol_origin = self._vol_bnds[:,
                                          0].copy(order='C').astype(np.float32)

        print("Voxel volume size: {} x {} x {} - # points: {:,}".format(
            self._vol_dim[0], self._vol_dim[1], self._vol_dim[2],
            self._vol_dim[0] * self._vol_dim[1] * self._vol_dim[2]))

        # Initialize pointers to voxel volume in CPU memory
        self._tsdf_vol_cpu = np.ones(self._vol_dim).astype(np.float32)
        # for computing the cumulative moving average of observations per voxel
        self._weight_vol_cpu = np.zeros(self._vol_dim).astype(np.float32)
        self._color_vol_cpu = np.zeros(self._vol_dim).astype(np.float32)

        self.gpu_mode = use_gpu and FUSION_GPU_MODE

        # Copy voxel volumes to GPU
        if self.gpu_mode:
            self._tsdf_vol_gpu = cuda.mem_alloc(self._tsdf_vol_cpu.nbytes)
            cuda.memcpy_htod(self._tsdf_vol_gpu, self._tsdf_vol_cpu)
            self._weight_vol_gpu = cuda.mem_alloc(self._weight_vol_cpu.nbytes)
            cuda.memcpy_htod(self._weight_vol_gpu, self._weight_vol_cpu)
            self._color_vol_gpu = cuda.mem_alloc(self._color_vol_cpu.nbytes)
            cuda.memcpy_htod(self._color_vol_gpu, self._color_vol_cpu)

            # Cuda kernel function (C++)
            self._cuda_src_mod = SourceModule("""
        __global__ void integrate(float * tsdf_vol,
                                  float * weight_vol,
                                  float * color_vol,
                                  float * vol_dim,
                                  float * vol_origin,
                                  float * cam_intr,
                                  float * cam_pose,
                                  float * other_params,
                                  float * color_im,
                                  float * depth_im) {
          // Get voxel index
          int gpu_loop_idx = (int) other_params[0];
          int max_threads_per_block = blockDim.x;
          int block_idx = blockIdx.z*gridDim.y*gridDim.x+blockIdx.y*gridDim.x+blockIdx.x;
          int voxel_idx = gpu_loop_idx*gridDim.x*gridDim.y*gridDim.z*max_threads_per_block+block_idx*max_threads_per_block+threadIdx.x;
          int vol_dim_x = (int) vol_dim[0];
          int vol_dim_y = (int) vol_dim[1];
          int vol_dim_z = (int) vol_dim[2];
          if (voxel_idx > vol_dim_x*vol_dim_y*vol_dim_z)
              return;
          // Get voxel grid coordinates (note: be careful when casting)
          float voxel_x = floorf(((float)voxel_idx)/((float)(vol_dim_y*vol_dim_z)));
          float voxel_y = floorf(((float)(voxel_idx-((int)voxel_x)*vol_dim_y*vol_dim_z))/((float)vol_dim_z));
          float voxel_z = (float)(voxel_idx-((int)voxel_x)*vol_dim_y*vol_dim_z-((int)voxel_y)*vol_dim_z);
          // Voxel grid coordinates to world coordinates
          float voxel_size = other_params[1];
          float pt_x = vol_origin[0]+voxel_x*voxel_size;
          float pt_y = vol_origin[1]+voxel_y*voxel_size;
          float pt_z = vol_origin[2]+voxel_z*voxel_size;
          // World coordinates to camera coordinates
          float tmp_pt_x = pt_x-cam_pose[0*4+3];
          float tmp_pt_y = pt_y-cam_pose[1*4+3];
          float tmp_pt_z = pt_z-cam_pose[2*4+3];
          float cam_pt_x = cam_pose[0*4+0]*tmp_pt_x+cam_pose[1*4+0]*tmp_pt_y+cam_pose[2*4+0]*tmp_pt_z;
          float cam_pt_y = cam_pose[0*4+1]*tmp_pt_x+cam_pose[1*4+1]*tmp_pt_y+cam_pose[2*4+1]*tmp_pt_z;
          float cam_pt_z = cam_pose[0*4+2]*tmp_pt_x+cam_pose[1*4+2]*tmp_pt_y+cam_pose[2*4+2]*tmp_pt_z;
          // Camera coordinates to image pixels
          int pixel_x = (int) roundf(cam_intr[0*3+0]*(cam_pt_x/cam_pt_z)+cam_intr[0*3+2]);
          int pixel_y = (int) roundf(cam_intr[1*3+1]*(cam_pt_y/cam_pt_z)+cam_intr[1*3+2]);
          // Skip if outside view frustum
          int im_h = (int) other_params[2];
          int im_w = (int) other_params[3];
          if (pixel_x < 0 || pixel_x >= im_w || pixel_y < 0 || pixel_y >= im_h || cam_pt_z<0)
              return;
          // Skip invalid depth
          float depth_value = depth_im[pixel_y*im_w+pixel_x];
          if (depth_value == 0)
              return;
          // Integrate TSDF
          float trunc_margin = other_params[4];
          float depth_diff = depth_value-cam_pt_z;
          if (depth_diff < -trunc_margin)
              return;
          float dist = fmin(1.0f,depth_diff/trunc_margin);
          float w_old = weight_vol[voxel_idx];
          float obs_weight = other_params[5];
          float w_new = w_old + obs_weight;
          weight_vol[voxel_idx] = w_new;
          tsdf_vol[voxel_idx] = (tsdf_vol[voxel_idx]*w_old+obs_weight*dist)/w_new;
          // Integrate color
          float old_color = color_vol[voxel_idx];
          float old_b = floorf(old_color/(256*256));
          float old_g = floorf((old_color-old_b*256*256)/256);
          float old_r = old_color-old_b*256*256-old_g*256;
          float new_color = color_im[pixel_y*im_w+pixel_x];
          float new_b = floorf(new_color/(256*256));
          float new_g = floorf((new_color-new_b*256*256)/256);
          float new_r = new_color-new_b*256*256-new_g*256;
          new_b = fmin(roundf((old_b*w_old+obs_weight*new_b)/w_new),255.0f);
          new_g = fmin(roundf((old_g*w_old+obs_weight*new_g)/w_new),255.0f);
          new_r = fmin(roundf((old_r*w_old+obs_weight*new_r)/w_new),255.0f);
          color_vol[voxel_idx] = new_b*256*256+new_g*256+new_r;
        }""")

            self._cuda_integrate = self._cuda_src_mod.get_function("integrate")

            # Determine block/grid size on GPU
            gpu_dev = cuda.Device(0)
            self._max_gpu_threads_per_block = gpu_dev.MAX_THREADS_PER_BLOCK
            n_blocks = int(
                np.ceil(
                    float(np.prod(self._vol_dim)) /
                    float(self._max_gpu_threads_per_block)))
            #print "Total number of Voxels:",float(np.prod(self._vol_dim))
            #print "Max threads per block:",self._max_gpu_threads_per_block
            #print "Number of blocks Required:",n_blocks
            grid_dim_x = min(gpu_dev.MAX_GRID_DIM_X,
                             int(np.floor(np.cbrt(n_blocks))))
            grid_dim_y = min(gpu_dev.MAX_GRID_DIM_Y,
                             int(np.floor(np.sqrt(n_blocks / grid_dim_x))))
            grid_dim_z = min(
                gpu_dev.MAX_GRID_DIM_Z,
                int(np.ceil(float(n_blocks) / float(grid_dim_x * grid_dim_y))))
            self._max_gpu_grid_dim = np.array(
                [grid_dim_x, grid_dim_y, grid_dim_z]).astype(int)
            self._n_gpu_loops = int(
                np.ceil(
                    float(np.prod(self._vol_dim)) / float(
                        np.prod(self._max_gpu_grid_dim) *
                        self._max_gpu_threads_per_block)))
            #print "Grid dimension",self._max_gpu_grid_dim
            #print "No of GPU loops",self._n_gpu_loops
        else:
            # Get voxel grid coordinates
            xv, yv, zv = np.meshgrid(range(self._vol_dim[0]),
                                     range(self._vol_dim[1]),
                                     range(self._vol_dim[2]),
                                     indexing='ij')
            self.vox_coords = np.concatenate(
                [xv.reshape(1, -1),
                 yv.reshape(1, -1),
                 zv.reshape(1, -1)],
                axis=0).astype(int).T
예제 #15
0
    q = rand_q()
    v = rand_v()

    J = r.jacob0(q)
    Jt = J[:3, :]
    # H = r.hessian0(q)
    # Ht = H[:3, :, :]

    q_n = [10000, 0]
    q_m = [10000, 0]

    # cond = np.linalg.cond(J[:3, :])
    m = r.manipulability(J=J, axes='trans')
    # infn = np.linalg.norm(Jt, 2)
    psi = (np.cbrt(np.linalg.det(Jt @ np.transpose(Jt)))) / \
        (np.trace(Jt @ np.transpose(Jt)) / 3)

    for j in range(1000):
        qd = np.linalg.pinv(J) @ v

        if np.max(qd) > q_m[1]:
            q_m[1] = np.max(qd)

        if np.min(qd) < q_m[0]:
            q_m[0] = np.min(qd)

        if np.linalg.norm(qd) > q_n[1]:
            q_n[1] = np.linalg.norm(qd)
        elif np.linalg.norm(qd) < q_n[0]:
            q_n[0] = np.linalg.norm(qd)
예제 #16
0
import numpy as np
import sys
import os
sys.path.append(os.path.abspath('..'))
from two_d_graphs.myfunctions import *
from two_d_graphs.posdef import *
from two_d_graphs.MatrixMaxProj import *
from scipy import io
from statsmodels.stats.moment_helpers import cov2corr

p = 50
d = 2
n_change = 3
len_t = 201
n = 1
h = 5.848 / np.cbrt(len_t - 1)
sigma = 0.1
phi = 0.5

gG = getGraph(p, d)
S, A = gG
A_T = getGraphatT_Shuheng(S, A, n_change)[1]
A_T_list = [lam * A_T + (1 - lam) * A
            for lam in np.linspace(0, 1, len_t)]  # Omega
C_T_list = [getCov(item) for item in A_T_list]  # Cov: 0+class time

gG0 = getGraph(p, d)
S0, A0 = gG0
C0 = getCov(A0)

X = np.random.multivariate_normal(mean=np.zeros(p), cov=C0, size=n)
예제 #17
0
p = X_list[0][0].shape[1]

set_length_alpha = 51
set_length_beta = 51
indexOfPenalty = 3

alpha_upper = alpha_max(genEmpCov(X_list[3][10].T))
alpha_lower = alpha_max(genEmpCov(X_list[0][0].T))

alpha_set = np.logspace(np.log10(alpha_lower * 5e-2), np.log10(alpha_upper),
                        set_length_alpha)
beta_set = np.logspace(-2, 0.5, set_length_beta)
parameters_product = itertools.product(alpha_set, beta_set)
mesh_parameters = list(parameters_product)

h = 5.848 / np.cbrt(ni * len_t)
#-------------------------------------------------------------------------------------------------------------------------------

X_array = np.array(X_list)  # class by time by ni by p
X_array = np.reshape(X_array,
                     (len_class, len_t * ni, p))  # class by ni*len_t by p

pool = NoDaemonProcessPool(processes=10)
results_mymethod = [
    pool.apply(mtvgl.myTVGL,
               args=(X_array, ni, parameters[0], parameters[1], indexOfPenalty,
                     True, h)) for parameters in mesh_parameters
]

# mtvgl.myTVGL(X_array, ni, 0.5, 1, indexOfPenalty, True, h)
예제 #18
0
파일: make_cc.py 프로젝트: Micket/CCBuilder
    ccb.write_hdf5('testfile_cleanup.hdf5', 3*[M], 3*[delta_x], trunc_triangles, grain_ids_3, phases_3, good_voxels_3, euler_angles_3, surface_voxels_3, gb_voxels_3, interface_voxels_3, overlaps_0)

    # Compute actual volume fraction:
    print("generated volume fraction of Co (after tweaks):", vol_frac_Co_3)

#         final_frac.append(vol_frac_Co_2)
#
#     print('************************************* average frac *************************** ', np.mean(final_frac))
#     mean_frac.append(np.mean(final_frac))
# print(mean_frac)

# Saving grain volume data
if False:
    np.savetxt('d_eq_orig.txt', [t.d_eq for t in trunc_triangles])
    np.savetxt('d_eq_0.txt', np.cbrt(6./np.pi * grain_volumes_0 * ((L/M)**3)))
    np.savetxt('d_eq_2.txt', np.cbrt(6./np.pi * grain_volumes_2 * ((L/M)**3)))
     Plot initial and final distributions
    plt.hist(np.array([t.d_eq for t in trunc_triangles]), alpha=0.5, bins=15, normed=True, label='Initial')
    plt.hist(np.cbrt(6./np.pi * grain_volumes_2 * ((L/M)**3)), alpha=0.5, bins=15, normed=True, label='Final')
    plt.legend(loc='upper right')
    plt.show()

# Misorientation:
if False:
    angles = compute_all_misorientation_voxel(trunc_triangles, grain_ids_2, [M]*3)
    all_angles = np.array(list(angles.values()))
    np.savetxt('misorientation.txt', all_angles)
    theoretical = np.loadtxt('D3.txt')
    plt.hist(np.array(all_angles), bins=20, normed=True)
    plt.plot(theoretical[:,0], theoretical[:,1])
예제 #19
0
def funkcja1(x):
    return np.cbrt(-np.exp(-x) -
                   x)  # Przeksztalcenie funkcji do postaci y = ...
예제 #20
0
 def calc_der(self, x):
     return 1. / 3 / (np.cbrt(np.log(x + 1e-20)) ** 2) / (x + 1e-20)
예제 #21
0
 def calc(self, x):
     return np.cbrt(np.log(x + 1e-20))
예제 #22
0
        print("Prepared", len(trunc_triangles), "triangles")
        if use_potential:
            ccb.optimize_midpoints(L, trunc_triangles)
            grain_ids_0, overlaps_0, voxel_indices_0 = ccb_c.populate_voxels(M, L, trunc_triangles, 1, 0, 1.0)
        else:
            grain_ids_0, overlaps_0, voxel_indices_0 = ccb_c.populate_voxels(M, L, trunc_triangles, nr_tries, M, 1.0)
        phases_0, good_voxels_0, euler_angles_0, phase_volumes_0, grain_volumes_0 = ccb_c.calc_grain_prop(M, grain_ids_0, trunc_triangles)
        surface_voxels_0, gb_voxels_0, interface_voxels_0 = ccb_c.calc_surface_prop(M, grain_ids_0)

        vol_frac_WC_0 = phase_volumes_0[1]/np.float(np.sum(phase_volumes_0))
        vol_frac_Co_0.append( 1 - vol_frac_WC_0 )

        sum_gb_voxels_0 = np.sum(gb_voxels_0)
        contiguity_0.append( sum_gb_voxels_0 / np.float(sum_gb_voxels_0 + np.sum(interface_voxels_0)) )

        d_eq_0.append(np.mean(np.cbrt(6./np.pi * grain_volumes_0 * ((L/M)**3))))

        print("********************** Final vol fraction", vol_frac_Co_0[-1])

        # Do Potts on coarse grid first for an improved initial guess.
        grain_ids_2 = grain_ids_0.copy()
        gb_voxels_2 = gb_voxels_0.copy()

        if mc_steps > 0:
            M_coarse = M//2
            print("creating coarse grid")
            grain_ids_coarse, overlaps_coarse, voxel_indices_coarse = ccb_c.populate_voxels(M_coarse, L, trunc_triangles, 0, M_coarse, 1.0)
            print("coarse grid for potts done")
            surface_voxels_coarse, gb_voxels_coarse, interface_voxels_coarse = ccb_c.calc_surface_prop(M_coarse, grain_ids_coarse)
            print("starting coarse potts")
            ccb_c.make_mcp_bound(M_coarse, grain_ids_coarse, gb_voxels_coarse, overlaps_coarse, voxel_indices_coarse, np.int(mc_steps*M_coarse**4), kBT)
예제 #23
0
Lsuns = data[:, 1]  # data in Lsun units
makesubdir('output')  # create file out dir

# conversions and precalculations
radii = np.zeros(Rsuns.size)  # convert Rsun to AU
for i, r in enumerate(Rsuns):
    radii[i] = r * 0.00465047  # 215 Rsun ~ 1 AU
watts = np.zeros(Lsuns.size)  # convert Lsun to W (MKS units)
for i, l in enumerate(Lsuns):
    watts[i] = l * 3.828e26  # IAU Resolution B3 conversion
lumins = np.zeros(watts.size)  # convert W to sim units
for i, w in enumerate(watts):
    lumins[i] = (w * ((6.7e-12)**2) * (5e-31)) / ((3.2e-8)**3)
t_fs = np.zeros(lumins.size)  # precalculate t_f (Eq. 1)
for i, l in enumerate(lumins):
    t_fs[i] = np.cbrt(masses[i] * radii[i]**2 / l)
taus = np.zeros(t_fs.size)  # precalc tau (Eq. 2)
G = 4 * np.pi**2  # units of AU, yr, and Msun
for i, t_f in enumerate(t_fs):
    taus[i] = 2. * radii[i]**3 / G / masses[i] / t_f

# main loop
for i, init_a in enumerate(init_as):
    # initialize sim and create Interpolator objects
    timer_start = time.perf_counter()
    sim, rebx, tides = makesim(init_a)
    starmass = reboundx.Interpolator(rebx, mtimes, masses, 'spline')
    starradius = reboundx.Interpolator(rebx, rtimes, radii, 'spline')
    startau = reboundx.Interpolator(rebx, ltimes, taus, 'spline')

    # update Sun's mass and radius accordingly
예제 #24
0
def train_model(model: Model,
                dataset_id,
                dataset_prefix,
                epochs=50,
                batch_size=128,
                val_subset=None,
                cutoff=None,
                normalize_timeseries=False,
                learning_rate=1e-3):
    """
    Trains a provided Model, given a dataset id.

    Args:
        model: A Keras Model.
        dataset_id: Integer id representing the dataset index containd in
            `utils/constants.py`.
        dataset_prefix: Name of the dataset. Used for weight saving.
        epochs: Number of epochs to train.
        batch_size: Size of each batch for training.
        val_subset: Optional integer id to subset the test set. To be used if
            the test set evaluation time significantly surpasses training time
            per epoch.
        cutoff: Optional integer which slices of the first `cutoff` timesteps
            from the input signal.
        normalize_timeseries: Bool / Integer. Determines whether to normalize
            the timeseries.

            If False, does not normalize the time series.
            If True / int not equal to 2, performs standard sample-wise
                z-normalization.
            If 2: Performs full dataset z-normalization.
        learning_rate: Initial learning rate.
    """
    X_train, y_train, X_test, y_test, is_timeseries = load_dataset_at(
        dataset_id, normalize_timeseries=normalize_timeseries)
    max_nb_words, sequence_length = calculate_dataset_metrics(X_train)

    if sequence_length != MAX_SEQUENCE_LENGTH_LIST[dataset_id]:
        if cutoff is None:
            choice = cutoff_choice(dataset_id, sequence_length)
        else:
            assert cutoff in [
                'pre', 'post'
            ], 'Cutoff parameter value must be either "pre" or "post"'
            choice = cutoff

        if choice not in ['pre', 'post']:
            return
        else:
            X_train, X_test = cutoff_sequence(X_train, X_test, choice,
                                              dataset_id, sequence_length)

    if not is_timeseries:
        X_train = pad_sequences(X_train,
                                maxlen=MAX_SEQUENCE_LENGTH_LIST[dataset_id],
                                padding='post',
                                truncating='post')
        X_test = pad_sequences(X_test,
                               maxlen=MAX_SEQUENCE_LENGTH_LIST[dataset_id],
                               padding='post',
                               truncating='post')

    classes = np.unique(y_train)
    le = LabelEncoder()
    y_ind = le.fit_transform(y_train.ravel())
    recip_freq = len(y_train) / (len(le.classes_) *
                                 np.bincount(y_ind).astype(np.float64))
    class_weight = recip_freq[le.transform(classes)]

    print("Class weights : ", class_weight)

    y_train = to_categorical(y_train, len(np.unique(y_train)))
    y_test = to_categorical(y_test, len(np.unique(y_test)))

    if is_timeseries:
        factor = 1. / np.cbrt(2)
    else:
        factor = 1. / np.sqrt(2)

    path_splits = os.path.split(dataset_prefix)
    if len(path_splits) > 1:
        base_path = os.path.join('weights', *path_splits)

        if not os.path.exists(base_path):
            os.makedirs(base_path)

        base_path = os.path.join(base_path, path_splits[-1])

    else:
        all_weights_path = os.path.join('weights', dataset_prefix)

        if not os.path.exists(all_weights_path):
            os.makedirs(all_weights_path)

    model_checkpoint = ModelCheckpoint("./weights/%s_weights.h5" %
                                       dataset_prefix,
                                       verbose=1,
                                       monitor='loss',
                                       save_best_only=True,
                                       save_weights_only=True)
    reduce_lr = ReduceLROnPlateau(monitor='loss',
                                  patience=100,
                                  mode='auto',
                                  factor=factor,
                                  cooldown=0,
                                  min_lr=1e-4,
                                  verbose=2)

    callback_list = [model_checkpoint, reduce_lr]

    optm = Adam(lr=learning_rate)

    model.compile(optimizer=optm,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    if val_subset is not None:
        X_test = X_test[:val_subset]
        y_test = y_test[:val_subset]

    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              callbacks=callback_list,
              class_weight=class_weight,
              verbose=2,
              validation_data=(X_test, y_test))
예제 #25
0
 def test_cbrt_array(self):
     assert np.all(np.cbrt(np.array([1., 8., 64.]) * u.m**3)
                   == np.array([1., 2., 4.]) * u.m)
예제 #26
0
from mpi4py import MPI
from lammps import lammps
import numpy as np
import sys

radius = int(sys.argv[1])
ratio = int(sys.argv[2])

length = radius * ratio * 2 * np.pi
box_sides = radius * np.cbrt(1.5 * ratio * np.pi) + 2

seed = int(sys.argv[3])

A = int(sys.argv[4])
B = 25
density = float(sys.argv[5])

save_dir = sys.argv[6]

lmp = lammps()

initial_commands = [
    "units lj", "dimension 3", "boundary p p p", "neighbor 0.3 bin",
    "neigh_modify every 1 check yes", "atom_style  mdpd"
]

create_commands = [
    f"region  mdpd  block -{box_sides} {box_sides} -{box_sides} {box_sides} 0 {length} units box",
    "create_box 1 mdpd", f"lattice  fcc {density}",
    f"region tube cylinder z 0 0 {radius} INF INF",
    "create_atoms  1 region tube", "mass 1 1.0"
    def addParticles(self, n, method, mass = 0.0):
        """
        Adds n particles to the simulation box using two methods: random
        and lattice. The first one inserts particles randomly. The second
        inserts them in a lattice.

        Parameters
        ----------
        method : string
        Method to insert particles. Values can be "random" or "lattice".

        n : integer
        Number of molecules to be inserted in the box.

        Returns
        ----------
        None


        Raises
        ----------
        None


        Notes
        ----------
        None

        """

        self.box.mass = mass / 6.023e23 * 10.0**-3
        self.box.numParticles = n

        if method == "random":
            self.box.coordinates = (0.5 - np.random.rand(n,3)) * self.box.length

        elif method == "lattice":
        #    nSide = 1
        #    self.box.coordinates = np.zeros((n,3))
        #    while np.power(nSide, 3) < n:
        #        nSide += 1
        #    counterPosition = np.zeros((3, ))
        #    for iParticle in range(0, n):
        #        self.box.coordinates[iParticle] = \
        #            (counterPosition + 0.5)*self.box.length / nSide \
        #            - 0.5*self.box.length 
        #        counterPosition[0] += 1
        #        if counterPosition[0] == nSide:
        #            counterPosition[0] = 0
        #            counterPosition[1] += 1
        #            if counterPosition[1] == nSide:
        #                counterPosition[1] = 0
        #                counterPosition[2] += 1

        #    for iParticle in range(0, self.box.numParticles):
        #        self.box.coordinates[iParticle] -= 0.5


            xVector = np.linspace(0.0,self.box.length,np.cbrt(self.box.numParticles) + 1)
            yVector = np.linspace(0.0,self.box.length,np.cbrt(self.box.numParticles) + 1)
            zVector = np.linspace(0.0,self.box.length,np.cbrt(self.box.numParticles) + 1)
            self.box.coordinates = np.vstack(np.meshgrid(xVector,yVector,zVector)).reshape(3,-1).T

            excess = len(self.box.coordinates) - self.box.numParticles

            self.box.coordinates = self.box.coordinates[:-excess]

            self.box.coordinates *= 0.98
예제 #28
0
def generate_lattice(sg,
                     volume,
                     minvec=tol_m,
                     minangle=pi / 6,
                     max_ratio=10.0,
                     maxattempts=100):
    """
    generate the lattice according to the space group symmetry and number of atoms
    if the space group has centering, we will transform to conventional cell setting
    If the generated lattice does not meet the minimum angle and vector requirements,
    we try to generate a new one, up to maxattempts times

    args:
        sg: International number of the space group
        volume: volume of the lattice
        minvec: minimum allowed lattice vector length (among a, b, and c)
        minangle: minimum allowed lattice angle (among alpha, beta, and gamma)
        max_ratio: largest allowed ratio of two lattice vector lengths
    """
    maxangle = pi - minangle
    for n in range(maxattempts):
        #Triclinic
        if sg <= 2:
            #Derive lattice constants from a random matrix
            mat = random_shear_matrix(width=0.2)
            a, b, c, alpha, beta, gamma = matrix2para(mat)
            x = sqrt(1 - cos(alpha)**2 - cos(beta)**2 - cos(gamma)**2 + 2 *
                     (cos(alpha) * cos(beta) * cos(gamma)))
            vec = random_vector()
            abc = volume / x
            xyz = vec[0] * vec[1] * vec[2]
            a = vec[0] * np.cbrt(abc) / np.cbrt(xyz)
            b = vec[1] * np.cbrt(abc) / np.cbrt(xyz)
            c = vec[2] * np.cbrt(abc) / np.cbrt(xyz)
        #Monoclinic
        elif sg <= 15:
            alpha, gamma = pi / 2, pi / 2
            beta = gaussian(minangle, maxangle)
            x = sin(beta)
            vec = random_vector()
            xyz = vec[0] * vec[1] * vec[2]
            abc = volume / x
            a = vec[0] * np.cbrt(abc) / np.cbrt(xyz)
            b = vec[1] * np.cbrt(abc) / np.cbrt(xyz)
            c = vec[2] * np.cbrt(abc) / np.cbrt(xyz)
        #Orthorhombic
        elif sg <= 74:
            alpha, beta, gamma = pi / 2, pi / 2, pi / 2
            x = 1
            vec = random_vector()
            xyz = vec[0] * vec[1] * vec[2]
            abc = volume / x
            a = vec[0] * np.cbrt(abc) / np.cbrt(xyz)
            b = vec[1] * np.cbrt(abc) / np.cbrt(xyz)
            c = vec[2] * np.cbrt(abc) / np.cbrt(xyz)
        #Tetragonal
        elif sg <= 142:
            alpha, beta, gamma = pi / 2, pi / 2, pi / 2
            x = 1
            vec = random_vector()
            c = vec[2] / (vec[0] * vec[1]) * np.cbrt(volume / x)
            a = b = sqrt((volume / x) / c)
        #Trigonal/Rhombohedral/Hexagonal
        elif sg <= 194:
            alpha, beta, gamma = pi / 2, pi / 2, pi / 3 * 2
            x = sqrt(3.) / 2.
            vec = random_vector()
            c = vec[2] / (vec[0] * vec[1]) * np.cbrt(volume / x)
            a = b = sqrt((volume / x) / c)
        #Cubic
        else:
            alpha, beta, gamma = pi / 2, pi / 2, pi / 2
            s = (volume)**(1. / 3.)
            a, b, c = s, s, s
        #Check that lattice meets requirements
        maxvec = (a * b * c) / (minvec**2)
        if minvec < maxvec:
            #Check minimum Euclidean distances
            smallvec = min(a * cos(max(beta, gamma)),
                           b * cos(max(alpha, gamma)),
                           c * cos(max(alpha, beta)))
            if (a > minvec and b > minvec and c > minvec and a < maxvec
                    and b < maxvec and c < maxvec and smallvec < minvec
                    and alpha > minangle and beta > minangle
                    and gamma > minangle and alpha < maxangle
                    and beta < maxangle and gamma < maxangle
                    and a / b < max_ratio and a / c < max_ratio
                    and b / c < max_ratio and b / a < max_ratio
                    and c / a < max_ratio and c / b < max_ratio):
                return np.array([a, b, c, alpha, beta, gamma])
            #else:
            #print([a, b, c, maxvec, minvec, maxvec*minvec*minvec])
    #If maxattempts tries have been made without success
    print(
        "Error: Could not generate lattice after " + str(n + 1) +
        " attempts for volume ", volume)
    return
예제 #29
0
    def fit(self, text_list):
        """
        The fit method.

        :param text_list: List of input texts
        
        """

        if not type(text_list) == list:
            text_list = text_list.values.tolist()

        # Subsample the document space to reduce graph size.
        if len(text_list) > self.doc_limit:
            if self.targets is None:
                if not self.doc_limit is None:
                    text_list = text_list[:self.doc_limit]

            else:
                unique_targets = np.unique(self.targets)
                utx = defaultdict(list)
                for utarget in unique_targets:
                    indices = np.where(self.targets == utarget)[0]
                    utx[utarget] = indices.tolist()

                sampled_docs = []
                while len(sampled_docs) < self.doc_limit:
                    for k, v in utx.items():
                        if len(v) > 0:
                            relevant_index = v.pop()
                            sampled_docs.append(text_list[relevant_index])
                            self.subsample_classes.append(k)

                assert len(sampled_docs) == self.doc_limit
                text_list = sampled_docs
                del sampled_docs

        t_tokens = OrderedDict()

        for a in text_list:
            t_tokens[a] = set(
                [x.lower()[:self.ed_cutoff] for x in a.strip().split(" ")])

        nlist = {}

        for a in tqdm.tqdm(range(len(text_list))):
            for b in range(a, len(text_list)):
                set1 = t_tokens[text_list[a]]
                set2 = t_tokens[text_list[b]]
                jaccard = self.jaccard_index(set1, set2)
                nlist[(a, b)] = jaccard

        self.core_documents = t_tokens
        self.G = self.get_graph(nlist, len(text_list))
        G = nx.to_scipy_sparse_matrix(self.G,
                                      nodelist=list(range(len(text_list))))

        if self.verbose:
            logging.info("Graph normalization in progress.")

        laplacian = csgraph.laplacian(G, normed=True)

        if self.verbose:
            logging.info("SVD of the graph relation space in progress.")

        if self.ndim >= len(text_list): self.ndim = len(text_list) - 1

        svd = TruncatedSVD(n_components=self.ndim,
                           random_state=self.random_seed)

        self.node_embeddings = svd.fit_transform(laplacian)
        if self.neigh_size is None:
            self.neigh_size = int(np.cbrt(self.node_embeddings.shape[0]))
예제 #30
0
# Number of client processes
NUM_OF_CLIENTS = 2

# Global number of particles
GLOBAL_NUM_PART = args.N
assert (GLOBAL_NUM_PART % 2) == 0, "The number of particles must be even"

# Init particle density
init_density = args.density

# Global volume
GLOBAL_VOLUME = GLOBAL_NUM_PART / init_density

# Init box _length
init_box_length = np.cbrt(GLOBAL_VOLUME / NUM_OF_CLIENTS)

# Temperature
kT = args.T

# Displacement
DX_MAX = 0.4
DV_MAX = 0.2

# Number of steps, warmup steps
steps = args.steps
warmup = args.warmup
# Perform about 100 system checks during the simulation
check = int(steps / 1000)

# Trial move probabilities
예제 #31
0
def inner_distance_transform_3d(mask,
                                bins=None,
                                erosion_width=None,
                                alpha=0.1,
                                beta=1,
                                sampling=[0.5, 0.217, 0.217]):
    """Transform a label mask for a z-stack with an inner distance transform.

    .. code-block:: python

        inner_distance = 1 / (1 + beta * alpha * distance_to_center)

    Args:
        mask (numpy.array): A label mask (``y`` data).
        bins (int): The number of transformed distance classes.
        erosion_width (int): Number of pixels to erode edges of each labels
        alpha (float, str): Coefficent to reduce the magnitude of the distance
            value. If ``'auto'``, determines alpha for each cell based on the
            cell area.
        beta (float): Scale parameter that is used when ``alpha`` is "auto".
        sampling (list): Spacing of pixels along each dimension.

    Returns:
        numpy.array: A mask of same shape as input mask,
        with each label being a distance class from 1 to ``bins``.

    Raises:
        ValueError: ``alpha`` is a string but not set to "auto".
    """
    # Check input to alpha
    if isinstance(alpha, str):
        if alpha.lower() != 'auto':
            raise ValueError('alpha must be set to "auto"')

    mask = np.squeeze(mask)
    mask = erode_edges(mask, erosion_width)

    distance = ndimage.distance_transform_edt(mask, sampling=sampling)
    distance = distance.astype(K.floatx())

    label_matrix = label(mask)

    inner_distance = np.zeros(distance.shape, dtype=K.floatx())
    for prop in regionprops(label_matrix, distance):
        coords = prop.coords
        center = prop.weighted_centroid
        distance_to_center = (coords - center) * np.array(sampling)
        distance_to_center = np.sum(distance_to_center**2, axis=1)

        # Determine alpha to use
        if str(alpha).lower() == 'auto':
            _alpha = 1 / np.cbrt(prop.area)
        else:
            _alpha = float(alpha)

        center_transform = 1 / (1 + beta * _alpha * distance_to_center)
        coords_z = coords[:, 0]
        coords_x = coords[:, 1]
        coords_y = coords[:, 2]
        inner_distance[coords_z, coords_x, coords_y] = center_transform

    if bins is None:
        return inner_distance

    # divide into bins
    min_dist = np.amin(inner_distance.flatten())
    max_dist = np.amax(inner_distance.flatten())
    distance_bins = np.linspace(min_dist - K.epsilon(),
                                max_dist + K.epsilon(),
                                num=bins + 1)
    inner_distance = np.digitize(inner_distance, distance_bins, right=True)
    return inner_distance - 1  # minimum distance should be 0, not 1
예제 #32
0
                     y,
                     nd2,
                     cmap=matplotlib.colors.ListedColormap(clrs),
                     alpha=1.0,
                     shading='gouraud',
                     zorder=10)

# Plot the stripes
ax = fig.add_axes([0, 0, 1, 1],
                  facecolor='black',
                  xlim=((start + datetime.timedelta(days=1)).timestamp(),
                        (end - datetime.timedelta(days=1)).timestamp()),
                  ylim=(0, 1))
ax.set_axis_off()

ndata = numpy.transpose(ndata)
s = ndata.shape
y = numpy.linspace(0, 1, s[0] + 1)
x = [(a - datetime.timedelta(days=15)).timestamp() for a in dts]
x.append((dts[-1] + datetime.timedelta(days=15)).timestamp())
img = ax.pcolorfast(x,
                    y,
                    numpy.cbrt(ndata),
                    cmap='RdYlBu_r',
                    alpha=1.0,
                    vmin=-1.7,
                    vmax=1.7,
                    zorder=100)

fig.savefig('20CRv3.png')
예제 #33
0
import numpy as np
import matplotlib.pyplot as plt

a = 0.1
n = int(1e7)

t = np.random.random(n)
r = a / np.cbrt(1 - t)

x = np.linspace(a, 10 * a, num=100)
y = 3 * a**3 / x**4

hist, edge = np.histogram(r, x)
hist = hist / hist[0] * y[0]

plt.figure()
plt.plot(x, y, linewidth=3, color='b')
plt.draw()

# plt.figure()
# plt.hist(r, bins=100, edgecolor="w")
plt.plot(edge[:-1], hist, linewidth=3, color='r')
plt.show()
예제 #34
0
def gen_each(inlist):
	"""
	Function for generating synthetic data
	inlist[0] - dist of x
	inlist[1] - f1
	inlist[2] - dist of noise
	inlist[3] - f2
	inlist[4] - number of pts
	"""
	if len(inlist) != 5:
		print 'not enough parameters!'
		return -1

	n = inlist[4]
	# ----- dist of x -----
	if inlist[0] == 1:
		x = np.random.rand(n, 1)
	elif inlist[0] == 2:
		x = np.random.randn(n, 1)
	elif inlist[0] == 3:
		x = np.random.exponential(0.5, [n, 1])
	elif inlist[0] == 4:
		x = np.random.laplace(0, 1, [n, 1])
	elif inlist[0] == 5:
		x = np.random.lognormal(0, 1, [n, 1])

	# ----- f_1 -----
	if inlist[1] == 0:
		f1 = -x 
	elif inlist[1] == 1:
		f1 = np.exp( -(np.random.rand() * 0.1 + 1) * x )
	elif inlist[1] == 2:
		f1 = np.exp( -(np.random.rand() * 0.1 + 3) * x )

	# ----- noise -----
	if inlist[2] == 0:
		t = f1
	elif inlist[2] == 1:
		t = f1 + 0.2 * np.random.rand(n, 1)
	elif inlist[2] == 2:
		t = f1 + 0.05*np.random.randn(n, 1)
	elif inlist[2] == 3:
		t = f1 + np.random.exponential(0.5, [n, 1])
	elif inlist[2] == 4:
		t = f1 + np.random.laplace(0, 1, [n, 1])
	elif inlist[2] == 5:
		t = f1 + np.random.lognormal(0, 1, [n, 1])

	# ----- f_2 -----
	if inlist[3] == 0:
		y = t
	elif inlist[3] == 1:
		y = 1 / (t**2 + 1)
	elif inlist[3] == 2:
		y = t**2
	elif inlist[3] == 3:
		y = np.sin(t)
	elif inlist[3] == 4:
		y = np.cos(t)
	elif inlist[3] == 5:
		y = np.cbrt(t)

	xy = np.hstack((x, y))
	return xy
예제 #35
0
for n in range(0,len(axs)):
    axs[n].hist(white_wine.iloc[:,n],bins=50)
    axs[n].set_title('name={}'.format(cols[n])) 
# Obs 4 = "volatile acidity" and "residual sugar" are right skewed

###-----Since this is imbalanced class dataset we will try to achieve higher recall and use roc_auc as evaluation metric----###
    
#%%
    
#-- removing features which are highly correlated or zero correlated:  ('citric acid','free sulfur dioxide')
col = ['fixed_acidity','volatile_acidity','residual_sugar','chlorides','total_sulfur_dioxide','density','sulphates', 'alcohol']    
white_wine.head()
# Working with right skewed data--> cube root 
white_wine_mod = pd.DataFrame()
for n in col:
    white_wine_mod[col] = white_wine[col].apply(lambda x: np.cbrt(x))
white_wine_mod['pH']=white_wine['pH']
white_wine_mod['quality']=white_wine['quality']

# Working with normal data
white_wine_nor = white_wine[['fixed_acidity','volatile_acidity','residual_sugar','chlorides','total_sulfur_dioxide','density','sulphates', 'alcohol','pH','quality']]  

#%%
#-- detecting outliers, transforming data can also help in handling outliers.
z = np.abs(stats.zscore(white_wine_mod)) # Calculating Z-score
threshold = 3
np.where(z > 3)

white_wine_mod_o = white_wine_mod[(z < 3).all(axis=1)]
white_wine_mod_o.shape # (4616, 10) # without outliers
white_wine_mod.shape #(4898, 10)  # with outliers
예제 #36
0
def post_process(data, calib, M):
    import time
    started = time.time()
    mask, Fs, quality = [], [], []
    for i,d in enumerate(data):
        if i % 100 == 0:
            print(".", end="", flush=True)
        xy, uv, q = d[:,:2], d[:,2:4], d[:,4:]

        # represent as matrix [[a, b], [b, c]]
        # flip signs since maximum is negative definite
        a, b, c = -q[:,3], -q[:,4]/2, -q[:,5]

        # asses quality by determinant
        d = a*c-b*b
        m = d > 1e-6
        quality.append(np.median(d))

        if m.sum() > 10:
            F = algebraic_fit(xy[m], (xy+uv)[m], calib)
            mask.append(True)
            Fs.append(F)
        else:
            mask.append(False)
    print()

    F = np.asarray(Fs)
    mask = np.asarray(mask)
    quality = np.asarray(quality)

    # camera to sample coordinates
    #F = F.dot(M.T)

    # set volumetric scale to 1
    scale = np.cbrt(np.linalg.det(F))
    F /= scale[...,np.newaxis,np.newaxis]

    #
    # Finite strain theory
    #
    # Polar decomposition
    U, S, V = np.linalg.svd(F)
    R = U @ V
    #P = (V_T*S[:,np.newaxis,:]) @ V # == V.T @ diag(S) @ V

    V_T = V.transpose(0,2,1)
    R_T = R.transpose(0,2,1)
    epsilon = (V_T*np.log(S[:,np.newaxis,:])) @ V # == logm(P)

    k = (R - R_T)/2 # inverse or Rodrigez formula

    sintheta = np.sqrt((k*k).sum(axis=(1,2))/2)
    #costheta = (R.trace(0,1,2)-1)/2
    #theta = np.arctan2(sintheta, costheta)
    #omega = k*(theta/np.sin(theta))[:,np.newaxis,np.newaxis] # == logm(R)
    omega = k*(np.arcsin(sintheta)/sintheta)[:,np.newaxis,np.newaxis] # == logm(R)

    #
    # Infinitesimal strain theory
    #
    #F_T = F.transpose(0,2,1)
    #epsilon = (F+F_T)/2 - np.eye(3)[np.newaxis,:,:]
    #omega = (F-F_T)/2
    print("post_processed in", time.time()-started, "s")
    return mask, F, epsilon, omega, quality
#Import Data and remove outliers
model = pd.read_csv(r"E:\TAMIDS 2019\Data\pred.csv", low_memory=False, parse_dates=["Time"])
model = model[np.abs(model["Count"]-model["Count"].mean())<=(model["Count"].std()*3)]


# In[11]:


#Data Distribution and Probability Plots: Count
fig,ax = plt.subplots(ncols=2,nrows=3)
fig.set_size_inches(15,15)
sns.distplot(model['Count'], ax=ax[0][0])
stats.probplot(model["Count"], dist='norm', fit=True, plot=ax[0][1])
sns.distplot(stats.boxcox(model['Count'], 0), ax=ax[1][0])
stats.probplot(stats.boxcox(model["Count"], .5), dist='norm', fit=True, plot=ax[1][1])
sns.distplot(np.cbrt(model['Count']), ax=ax[2][0])
stats.probplot(np.cbrt(model["Count"]), dist='norm', fit=True, plot=ax[2][1])


# In[12]:


#Data Distribution and Probability Plots: Total Cost
fig,ax = plt.subplots(ncols=2,nrows=2)
fig.set_size_inches(15,15)
sns.distplot(model['total_cost'], ax=ax[0][0])
stats.probplot(model["total_cost"], dist='norm', fit=True, plot=ax[0][1])
sns.distplot(np.cbrt(model['total_cost']), ax=ax[1][0])
stats.probplot(np.cbrt(model["total_cost"]), dist='norm', fit=True, plot=ax[1][1])

예제 #38
0
def scale_const_density(scale_factor):
    factors = ps.get_identity_factors()
    factors["n_particles"] = scale_factor
    factors["box_length"] = np.cbrt(scale_factor)
    return factors
예제 #39
0
ax2 = fig.add_axes([0,0,1,1],facecolor='green')
ax2.set_axis_off() # Don't want surrounding x and y axis
nd2=numpy.random.rand(s[1],s[0])
clrs=[]
for shade in numpy.linspace(.42+.01,.36+.01):
    clrs.append((shade,shade,shade,1))

y = numpy.linspace(0,1,s[1])
x = numpy.linspace(0,1,s[0])
img = ax2.pcolormesh(x,y,nd2,
                        cmap=matplotlib.colors.ListedColormap(clrs),
                        alpha=1.0,
                        shading='gouraud',
                        zorder=10)

ax = fig.add_axes([0,0,1,1],facecolor='black',xlim=(0,1),ylim=(0,1))
ax.set_axis_off() # Don't want surrounding x and y axis

ndata=numpy.transpose(ndata)
s=ndata.shape
y = numpy.linspace(0,1,s[0])
x = numpy.linspace(0,1,s[1])
img = ax.pcolorfast(x,y,numpy.cbrt(ndata),
                        cmap='RdYlBu_r',
                        alpha=1.0,
                        vmin=-1.7,
                        vmax=1.7,
                        zorder=100)

fig.savefig('ensemble.pdf')
 def test_cbrt_array(self):
     # Calculate cbrt on both sides since on Windows the cube root of 64
     # does not exactly equal 4.  See 4388.
     values = np.array([1., 8., 64.])
     assert np.all(np.cbrt(values * u.m**3) ==
                   np.cbrt(values) * u.m)
예제 #41
0
 def test_cbrt_scalar(self):
     assert np.cbrt(8. * u.m**3) == 2. * u.m
예제 #42
0
def solve_four(a,b,c,d):

	if (a == 0):
		roots = solve_three(b,c,d)
		return [roots[0],roots[1],roots[1]]

	# http://www.it.uom.gr/teaching/linearalgebra/NumericalRecipiesInC/c5-6.pdf
	# http://web.archive.org/web/20120321013251/http://linus.it.uts.edu.au/~don/pubs/solving.html
	p = b/a
	q = c/a
	r = d/a
	u = q - np.square(p)/3
	v = r - p*q/3 + 2*p*p*p/27
	j = 4*u*u*u/27 * v*v

	if (b == 0 and c == 0):
		return [np.cbrt(-d),np.cbrt(-d),np.cbrt(-d)]
	elif (abs(p) > 10e100):
		return [-p,-p,-p]
	elif (abs(q) > 10e100):
		return [-np.cbrt(v),-np.cbrt(v),-np.cbrt(v)]
	elif (abs(u) > 10e100): #some big number
		return [np.cbrt(4)*u/3,np.cbrt(4)*u/3,np.cbrt(4)*u/3]

	if (j > 0):
		#one real root
		w = sqrt(j)
		if (v > 0):
			y = (u / 3)*np.cbrt(2 / (w + v)) - np.cbrt((w + v) / 2) - p / 3;
		else:
			y = np.cbrt((w - v) / 2) - (u / 3)*np.cbrt(2 / (w - v)) - p / 3;
		return 
	else:
		s = np.sqrt(-u/3)
		t = -v/ (2*s*s*s)
		k = np.arccos(t)/3

		y1 = 2 * s*np.cos(k) - p / 3;
		y2 = s*(-np.cos(k) + np.sqrt(3.)*np.sin(k)) - p / 3;
		y3 = s*(-np.cos(k) - np.sqrt(3.)*np.sin(k)) - p / 3;
		return [float(y1[0]),float(y2[0]),float(y3[0])]
예제 #43
0
    def forward(self, pos, index_voxels):
        print("-------------------- In RVS --------------------")
        B = len(index_voxels)  # batch_size
        #print(B)
        device = pos.device
        vs = int(np.cbrt(len(index_voxels[0])))  # 64 -> 4, voxel_size
        centroids = torch.zeros(B, self.npoints, dtype=torch.long).to(device)
        centroids_index = []
        #print(index_voxels[0])
        #print('-------------------------------------------------------------')

        for batch in range(B):
            #print(batch)
            voxels_per_batch = index_voxels[batch]

            indexes = []

            dict_keys = voxels_per_batch.keys()
            len_key = len(dict_keys)

            #print("npoints")
            #print(self.npoints)
            #print("len key")
            #print(len_key)
            if self.npoints <= len_key:
                #print(list(voxels_per_batch.items()))
                #print("npoints: "+str(self.npoints)+" "+"length: "+str(len_key))
                selected_keys = random.sample(dict_keys, self.npoints)
                #print(selected_keys)
                i = 0
                for per_key in selected_keys:
                    #int_index = int(per_key)
                    #indexes.append([batch, int_index//10000, int_index//100, int_index%100])
                    indexes.append([batch, per_key[0], per_key[1], per_key[2]])
                    val = voxels_per_batch.get(per_key)
                    #print(val)
                    length = len(val)
                    #print(str(length)+'====================')
                    #print(val.shape)
                    if (length == 1):
                        tem = 0
                    else:
                        tem = random.randint(0, len(val) - 1)
                    #index = int(random.sample(val, 1)[0])
                    index = int(val[tem])
                    centroids[batch, i] = index
                    i = i + 1
                #print(centroids[batch])
            else:
                #self.npoints > len(voxels_per_batch):
                #print(list(voxels_per_batch.items()))
                selected_keys = dict_keys
                i = 0
                added = []
                for per_key in selected_keys:
                    #int_index = int(per_key)
                    #indexes.append([batch, int_index//10000, int_index//100, int_index%100])
                    indexes.append([batch, per_key[0], per_key[1], per_key[2]])
                    val = voxels_per_batch.get(per_key)
                    #print(val)
                    #index = int(random.sample(val, 1)[0])
                    #print("perkey")
                    #print(per_key)
                    #print("val")
                    #print(val)
                    length = len(val)
                    #print("length")
                    #print(length)
                    if (length == 1):
                        tem = 0
                    else:
                        tem = random.randint(0, len(val) - 1)
                    index = int(val[tem])
                    centroids[batch, i] = index
                    added.append(index)
                    #print("index")
                    #print(index)
                    i = i + 1

                add_num = 0
                while add_num < (self.npoints - len_key):
                    index = int(random.sample(range(pos.shape[1]), 1)[0])
                    #print(index)
                    if index not in added:
                        centroids[batch, len_key + add_num] = index
                        indexes.append(index)
                        add_num += 1
                        added.append(index)
                #print(index)
                #print(centroids[batch])

            centroids_index.append(indexes)
            i = 0

        return centroids, centroids_index  # centroid_index is not used
예제 #44
0
 def cal_nL(self,theta,rho):
     self.L_k = np.cbrt(self.particle_num/(1.0*rho))
     self.n_k = np.arange(-1*self.N_max,self.N_max+1)
     self.k_step = (2*np.pi/self.L_k)
     self.k_vec = self.n_k * self.k_step
예제 #45
0
 def test_cbrt_array(self):
     assert np.all(np.cbrt(np.array([1., 8., 64.]) * u.m**3)
                   == np.array([1., 2., 4.]) * u.m)
예제 #46
0
def test_flattened_hernquist():
    """
    This test compares the coefficients against some computed in the mathematica
    notebook 'flattened-hernquist.nb'. nmax and lmax here must match nmax and lmax
    in that notebook.
    """

    coeff_path = os.path.abspath(get_pkg_data_filename('data/Snlm-mathematica.csv'))

    G = 1.
    M = 1
    a = 1.
    q = 0.9

    # Note: this must be the same as in the mathematica notebook
    nmax = 8
    lmax = 8

    (Snlm,Serr),(Tnlm,Terr) = compute_coeffs(flattened_hernquist_density,
                                             nmax=nmax, lmax=lmax, skip_odd=True, skip_m=True,
                                             M=M, r_s=a, args=(M,a,q))

    for l in range(1, lmax+1, 2):
        for m in range(lmax+1):
            assert Snlm[0,l,m] == 0.

    m_Snl0 = np.loadtxt(coeff_path, delimiter=',')
    m_Snl0 = m_Snl0[:,::2] # every other l

    assert np.allclose(Snlm[0,::2,0], m_Snl0[0])

    # check that random points match in gradient and density
    np.random.seed(42)
    n_test = 1024
    r = 10.*np.cbrt(np.random.uniform(0.1**3,1,size=n_test)) # 1 to 10
    t = np.arccos(2*np.random.uniform(size=n_test) - 1)
    ph = np.random.uniform(0, 2*np.pi, size=n_test)
    x = r*np.cos(ph)*np.sin(t)
    y = r*np.sin(ph)*np.sin(t)
    z = r*np.cos(t)
    xyz = np.vstack((x, y, z))

    # confirmed by testing...
    tru_dens = flattened_hernquist_density(xyz[0], xyz[1], xyz[2], M, a, q)
    bfe_dens = density(np.ascontiguousarray(xyz.T), Snlm, Tnlm, M, a)
    assert np.all((np.abs(bfe_dens - tru_dens) / tru_dens) < 0.05) # <5%

    tru_grad = np.array([flattened_hernquist_gradient(xyz[0,i], xyz[1,i], xyz[2,i], G, M, a, q)
                        for i in range(xyz.shape[1])]).T
    bfe_grad = gradient(np.ascontiguousarray(xyz.T), Snlm, Tnlm, G, M, a).T

    # check what typical errors are
    # for j in range(3):
    #     pl.hist(np.abs((bfe_grad[j]-tru_grad[j])/tru_grad[j]))

    for j in range(3):
        assert np.all(np.abs((bfe_grad[j]-tru_grad[j])/tru_grad[j]) < 0.005) # 0.5%

    return

    # ------------------------------------------------------------------------
    # plots:

    # coefficients
    fig,ax = pl.subplots(1,1,figsize=(10,8))
    n,l = np.mgrid[:nmax+1, :lmax+1]
    c = ax.scatter(n.ravel(), l.ravel(), c=Snlm[:,:,0].ravel(), s=64,
                   norm=mpl.colors.SymLogNorm(1E-5), cmap='RdBu_r',
                   vmin=-100, vmax=100, linewidths=1., edgecolors='#666666')

    ax.xaxis.set_ticks(np.arange(0,nmax+1,1))
    ax.yaxis.set_ticks(np.arange(0,lmax+1,1))

    ax.set_xlim(-0.5, nmax+0.5)
    ax.set_ylim(-0.5, lmax+0.5)

    ax.set_xlabel('$n$')
    ax.set_ylabel('$l$')

    tickloc = np.concatenate((-10.**np.arange(2,-5-1,-1),
                              10.**np.arange(-5,2+1,1)))
    fig.colorbar(c, ticks=tickloc, format='%.0e')
    fig.tight_layout()

    # contour plot in r,t at ph=0

    rgrid = np.logspace(-1, 1., 128)
    tgrid = np.linspace(0, np.pi, 128)

    r,t = np.meshgrid(rgrid,tgrid)
    x = r*np.sin(t)
    z = r*np.cos(t)

    _xyz = np.vstack((x.ravel(),np.zeros_like(x.ravel()),z.ravel()))
    bfe_dens = density(np.ascontiguousarray(_xyz.T), Snlm, Tnlm, M, a)
    true_dens = flattened_hernquist_density(_xyz[0], _xyz[1], _xyz[2], M, a, q)

    fig,ax = pl.subplots(1, 1, figsize=(8,8))

    levels = 10**np.linspace(-4.5, 1, 16)
    ax.contour(np.log10(r), t, true_dens.reshape(x.shape),
               levels=levels, colors='k',
               locator=mpl.ticker.LogLocator(), label='True')
    ax.contour(np.log10(r), t, bfe_dens.reshape(x.shape),
               levels=levels, colors='r',
               locator=mpl.ticker.LogLocator(), label='BFE')

    ax.legend()
    fig.tight_layout()

    pl.show()
예제 #47
0
ax[1].plot(dataBJ2_70['Dmax'], dataBJ2_70['CscaKa'], label='BJ_70')

ax[2].plot(dataDO['Dmax'], dataDO['Ka'], label='DO_00', linestyle='-.')
ax[2].plot(meltedDO30['Dmax'], meltedDO30['Ka'], label='DO_30', linestyle='-.')
ax[2].plot(meltedDO70['Dmax'], meltedDO70['Ka'], label='DO_70', linestyle='-.')
ax[2].plot(dataBJ2['Dmax'], dataBJ2['Ka'], label='BJ_00')
ax[2].plot(dataBJ2_30['Dmax'], dataBJ2_30['Ka'], label='BJ_30')
ax[2].plot(dataBJ2_70['Dmax'], dataBJ2_70['Ka'], label='BJ_70')
ax[2].set_title('Copol Xsec Ka   [mm$^2$]')
ax[2].set_xlabel('Dmax     [mm]')
ax[2].legend()
ax[2].set_yscale('log')
fig.savefig('DO_BJ_comp_Xpol2.png')
# %%
plt.close('all')
reff = lambda mass: np.cbrt(3.0 * mass / (4.0 * np.pi * 916.0))
xeff = lambda r, l: 2.0 * np.pi * r / l
qeff = lambda C, r: C / (np.pi * r**2.0)


def reff2rhoBr07(r, ar):
    mass = 4.0 * np.pi * r**3.0 * 916.0 / 3.0
    size = 0.001 * (mass * 1000.0 / 8.9e-5)**(1.0 / 2.1)
    #    print(mass,size)
    vol = np.pi * size**3.0 * ar / 6.0
    den = mass / vol
    for i, d in enumerate(den):
        if d > 916.0:
            den[i] = 916.0
            size[i] = np.cbrt(6.0 * (mass[i] / den[i]) / (np.pi * ar))
    return size, den
예제 #48
0
    # For a very coarse grid, we should check the voxel indices. Requires Cython implementation for efficiency.
    gid = grain_ids_coarse[ci]
    for ii, g in zip(i, gid):
        if g != grain_ids_2[ii] and np.searchsorted(voxel_indices_0[g-2], ii) < len(voxel_indices_0[g-2]):
            grain_ids_2[ii] = g
    # This might change a few voxels to a value that they shouldn't obtain, but it's barely noticeable
    #grain_ids_2[i] = grain_ids_coarse[ci]
    surface_voxels_2, gb_voxels_2, interface_voxels_2 = ccb_c.calc_surface_prop(M, grain_ids_2)
    ccb_c.make_mcp_bound(M, grain_ids_2, gb_voxels_2, overlaps_0, voxel_indices_0, np.int(mc_steps*M**4), kBT)

    sum_gb_voxels_2 = np.sum(gb_voxels_2)
    contiguity_2.append( sum_gb_voxels_2 / np.float(sum_gb_voxels_2 + np.sum(interface_voxels_2)) )
    print("Contiguity is at", contiguity_2[-1])

    phases_2, good_voxels_2, euler_angles_2, phase_volumes_0, grain_volumes_2 = ccb_c.calc_grain_prop(M, grain_ids_2, trunc_triangles)
    d_eq_mean.append(np.mean(np.cbrt(6./np.pi * grain_volumes_2 * ((L/M)**3))))
    d_eq_std.append(np.std(np.cbrt(6./np.pi * grain_volumes_2 * ((L/M)**3))))

if False:
    import matplotlib.pyplot as plt
    plt.plot(np.array(mc_stepss), contiguity_2)
    plt.show()

fname = 'stat_mc_steps'
if use_potential:
    fname += '_U'
elif nr_tries == 0:
    fname += '_random'
else:
    fname += '_transl'
예제 #49
0
 def test_cbrt_scalar(self):
     assert np.cbrt(8. * u.m**3) == 2. * u.m
예제 #50
0
 def test_cbrt_array(self):
     # Calculate cbrt on both sides since on Windows the cube root of 64
     # does not exactly equal 4.  See 4388.
     values = np.array([1., 8., 64.])
     assert np.all(np.cbrt(values * u.m**3) == np.cbrt(values) * u.m)
예제 #51
0
                       [-np.sin(th), 0., np.cos(th)]])
    return matrix.dot(vec)


def rotate_z(th, vec):
    matrix = np.array([[np.cos(th), -np.sin(th), 0.],
                       [np.sin(th), np.cos(th), 0.], [0., 0., 1.]])
    return matrix.dot(vec)


def angle_between(v1, v2):
    cs = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
    return np.arccos(cs)


a = np.cbrt(G * M * T**2 / 4. / np.pi**2)
c = a * e
b = np.sqrt(a**2 - c**2)
J = m * np.sqrt(G * M * a * (1. - e**2))
perihelion = a * (1. - e)
aphelion = a * (1. + e)
vPerihelion = np.sqrt(G * M * (1. + e) / perihelion)
vAphelion = np.sqrt(G * M * (1. - e) / aphelion)
omegaEarth = 2 * np.pi * (T / Day + 1) / T

print("earth orbit semimajor axis: a={}".format(a))
print("earth rotation angular speed: omegaEarth={}".format(omegaEarth))

# Axis configuration:
#   Set theta(th) coordinate of perihelion to 0.
#   Set theta(th) axis direction the same as earth's orbital direction.
예제 #52
0
def predict():
    net = unet_model_3d((1, 64, 64, 64))
    net.load_weights("./data/logs/network_weights_loss.h5")
    global_tp = 0
    global_fn = 0
    global_fp = 0

    for patient in os.listdir(path):

        f = h5py.File(path + patient, "r")
        amount_of_subvolumes = len(f["images/images"])

        tp = 0
        fp = 0
        fn = 0

        for i in range(amount_of_subvolumes):
            images = np.array(np.reshape(f["images/images"][i], (1, 1, 64, 64, 64)))
            labels = np.array(np.reshape(f["labels/labels"][i], (1, 1, 64, 64, 64)))
            # if len(np.nonzero(labels)[1]) == 0:
            # continue
            prediction = net.predict(images, batch_size=1, verbose=1)

            highly_conf_predicted = len(np.where(prediction[0][0] > 0.99)[0])
            # plot(prediction, labels)

            # aneurysm in mask -> dice can be considered as measure
            if len(np.nonzero(labels)[1]) != 0:
                dc = 1 - distance.dice(
                    np.reshape(labels, (-1,)), np.reshape(prediction, (-1,))
                )

                if dc > 0.30:
                    # aneurysm detected correctly
                    tp += 1
                    visualize_mask(prediction[0][0])
                    visualize_mask(labels[0][0])
                else:
                    # aneurysm not detected correctly
                    fn += 1
                    visualize_mask(prediction[0][0])
                    visualize_mask(labels[0][0])

            # no aneurysm in mask but in prediction
            elif highly_conf_predicted > 50:
                # check whether this is predicted aneurysm or random activation (check is across one axis only)
                max_index = np.max((np.where(prediction[0][0] > 0.99)[0]))
                min_index = np.min((np.where(prediction[0][0] > 0.99)[0]))
                if max_index - min_index < np.cbrt(highly_conf_predicted) + 5:
                    fp += 1

        # compute precision and recall per patient
        precision = tp + 0.0001 / (tp + fp + 0.0001)
        recall = tp + 0.0001 / (tp + fn + 0.0001)
        print("precision: " + str(precision) + " recall: " + str(recall))

        global_fn += fn
        global_fp += fp
        global_tp += tp

    precision = global_tp / (global_tp + global_fp)
    recall = global_tp / (global_tp + global_fn)
    print("precision: " + str(precision) + " recall: " + str(recall))