Esempio n. 1
0
def computeCollisions(alpha, N, rem, dt, rv_max, vel):

    # First we have to determine the maximum number of candidate collisions
    n_cols_max = (N * rv_max * dt / 2) + rem

    # Remaining collisions (<1) (to be computed in next time_step)
    rem = n_cols_max - int(n_cols_max)
    # We only use the integer part
    n_cols_max = int(n_cols_max)

    # It is more efficient to generate all random numbers at once
    random_intel.seed(brng='MT2203')
    # We choose multiple (n_cols_max) random pairs of particles
    random_pairs = random_intel.choice(N, size=(n_cols_max, 2))
    # List of random numbers to use as collision criteria
    random_numbers = random_intel.uniform(0, 1, n_cols_max)

    # Now, we generate random directions, (modulus 1) sigmas
    costheta = random_intel.uniform(0, 2, size=n_cols_max) - 1
    sintheta = np.sqrt(1 - costheta**2)
    phis = random_intel.uniform(0, 2 * np.pi, size=n_cols_max)

    x_coord = sintheta * np.cos(phis)
    y_coord = sintheta * np.sin(phis)
    z_coord = costheta
    sigmas = np.stack((x_coord, y_coord, z_coord), axis=1)

    # This is a vectorized method, it should be faster than the for loop
    # Using those random pairs we calculate relative velocities
    rel_vs = np.array(
        list(
            map(lambda i, j: vel[i] - vel[j], random_pairs[:, 0],
                random_pairs[:, 1])))
    # And now its modulus by performing a dot product with sigmas array
    rel_vs_mod = np.sum(rel_vs * sigmas, axis=1)

    # With this information we can check which collisions are valid
    ratios = rel_vs_mod / rv_max
    valid_cols = ratios > random_numbers

    # The valid pairs of particles of each valid collision are:
    valid_pairs = random_pairs[valid_cols]

    # Number of collisions that take place in this step
    cols_current_step = len(valid_pairs)

    # Now, we select only those rows that correspond to valid collisions
    valid_dotProducts = rel_vs_mod[valid_cols]
    # See: https://stackoverflow.com/questions/16229823/how-to-multiply-numpy-2d-array-with-numpy-1d-array
    valid_vectors = sigmas[valid_cols] * valid_dotProducts[:, None]
    new_vel_components = 0.5 * (1 + alpha) * valid_vectors

    valid_is = valid_pairs[:, 0]
    valid_js = valid_pairs[:, 1]

    # Updating the velocities array with its new values
    vel[valid_is] -= new_vel_components
    vel[valid_js] += new_vel_components

    return vel, cols_current_step, rem
def gen_rand_data(nopt, dtype=np.float64):
    rnd.seed(SEED)
    return (
        rnd.uniform(S0L, S0H, nopt).astype(dtype),
        rnd.uniform(XL, XH, nopt).astype(dtype),
        #np.linspace(S0L, S0H, nopt).astype(dtype),
        #np.linspace(XL, XH, nopt).astype(dtype),
        rnd.uniform(TL, TH, nopt).astype(dtype),
    )
def gen_rand_data(nopt, dims=2, NUMBER_OF_CENTROIDS=10, dtype=np.float64):
    rnd.seed(SEED)
    return (
        rnd.uniform(XL, XH, (nopt, dims)).astype(dtype),
        np.ones(nopt, dtype=np.int32),
        np.ones((NUMBER_OF_CENTROIDS, 2), dtype=dtype),
        np.ones((NUMBER_OF_CENTROIDS, 2), dtype=dtype),
        np.ones(NUMBER_OF_CENTROIDS, dtype=np.int32),
    )
Esempio n. 4
0
# Using Intel's Math Kernel Library
# https://software.intel.com/en-us/blogs/2016/06/15/faster-random-number-generation-in-intel-distribution-for-python
# 'MCG31' gives the best performance, 'MT2203' provides better randomness
# https://software.intel.com/en-us/mkl-vsnotes-basic-generators
random_intel.seed(brng='MT2203')

results = []
for alpha in (0.35, 0.45, 0.55, 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95,
              0.97, 0.99):

    a2_mean = []
    # Loop for each run of the same alpha
    for c in range(n_runs):
        # Initialize particle positions as a 2D numpy array (uniform).
        # As this is DSMC, some particles may be overlapping and not affect the outcome
        pos = random_intel.uniform(effective_radius, LX - effective_radius,
                                   (N, 3))
        # Initialize particle velocities as a 2D numpy array (normal/gaussian).
        vel = random_intel.normal(0, baseStateVelocity, (N, 3))
        # We now scale the velocity so that the vel of the center of mass
        # is initialized at 0.  Pöschel pag.203
        vel -= np.mean(vel, axis=0)

        print()
        print('Number of particles: ', N)
        print('Time step: ', dt)
        print('Coefficient of restitution: ', alpha)
        print()

        temperatures = []
        cumulants = []
        n_collisions = 0
Esempio n. 5
0
def gen_data(nopt):
    return (
        rnd.uniform(S0L, S0H, nopt),
        rnd.uniform(XL, XH, nopt),
        rnd.uniform(TL, TH, nopt),
    )
Esempio n. 6
0
def bit_flip(x, prob=0.05):
    x = np.array(x)
    selection = rng.uniform(0, 1, x.shape) < prob
    x[selection] = 1 * np.logical_not(x[selection])
    return x
Esempio n. 7
0
        epoch_lossG = 0
        epoch_lossD = 0

        for i in range(num_batches):
            print 'Doing batch [{}/{}]'.format(i, num_batches)
            noise = rng.normal(0, 1, (batch_size, latent_size))
            image_batch = X_train[i * batch_size:(i + 1) * batch_size]
            energy_batch = Y_train[i * batch_size:(i + 1) *
                                   batch_size].flatten()
            ecal_batch = ecal_train[i * batch_size:(i + 1) * batch_size]

            # num_nodes = len([n.name for n in tf.get_default_graph().as_graph_def().node])
            # print("Number of nodes in graph = {}".format(num_nodes))

            if image_batch.shape[0] > 0:
                sampled_energies = rng.uniform(.1, 5, size=(batch_size, 1))
                generator_ip = np.multiply(sampled_energies, noise)
                ecal_ip = np.multiply(2, sampled_energies)

                generated = sess.run(fake_images, feed_dict={z: generator_ip})

                (_, disc_loss) = sess.run(
                    [D_trainer, D_loss_summary],
                    feed_dict={
                        real_images: image_batch,
                        fake_images: generated,
                        z: generator_ip,
                        flipped_bits_ones: bit_flip(np.ones(batch_size)),
                        energy_batch_ph: energy_batch,
                        ecal_batch_ph: ecal_batch,
                        flipped_bits_zeroes: bit_flip(np.zeros(batch_size)),